diff options
270 files changed, 8204 insertions, 2841 deletions
diff --git a/.gitignore b/.gitignore index efb88c781..6028b8a44 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,5 @@ nosetests.xml nova/tests/cover/* nova/vcsversion.py tools/conf/nova.conf* +tools/lintstack.head.py +tools/pylint_exceptions diff --git a/HACKING.rst b/HACKING.rst index be894f072..35493e55b 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -9,6 +9,7 @@ Nova Style Commandments General ------- - Put two newlines between top-level code (funcs, classes, etc) +- Use only UNIX style newlines ("\n"), not Windows style ("\r\n") - Put one newline between methods in classes and anywhere else - Long lines should be wrapped in parentheses in preference to using a backslash for line continuation. diff --git a/bin/nova-api b/bin/nova-api index 8457ea43d..16cf33cc5 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -44,13 +44,16 @@ from nova import utils CONF = cfg.CONF CONF.import_opt('enabled_apis', 'nova.service') +CONF.import_opt('enabled_ssl_apis', 'nova.service') if __name__ == '__main__': config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() + launcher = service.ProcessLauncher() for api in CONF.enabled_apis: - server = service.WSGIService(api) + should_use_ssl = api in CONF.enabled_ssl_apis + server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_server(server, workers=server.workers or 1) launcher.wait() diff --git a/bin/nova-baremetal-deploy-helper b/bin/nova-baremetal-deploy-helper index f8a487d37..894a42003 100755 --- a/bin/nova-baremetal-deploy-helper +++ b/bin/nova-baremetal-deploy-helper @@ -18,7 +18,10 @@ """Starter script for Bare-Metal Deployment Service.""" import eventlet -eventlet.monkey_patch() + +# Do not monkey_patch in unittest +if __name__ == '__main__': + eventlet.monkey_patch() import os import sys diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 6187e052d..ee7bf2da9 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -111,7 +111,7 @@ CONF.register_cli_opt( def main(): - """Parse environment and arguments and call the approproate action.""" + """Parse environment and arguments and call the appropriate action.""" try: config_file = os.environ['CONFIG_FILE'] except KeyError: diff --git a/bin/nova-manage b/bin/nova-manage index 67212a198..90d191eca 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -205,7 +205,7 @@ class ShellCommands(object): @args('--path', dest='path', metavar='<path>', help='Script path') def script(self, path): - """Runs the script from the specifed path with flags set properly. + """Runs the script from the specified path with flags set properly. arguments: path""" exec(compile(open(path).read(), path, 'exec'), locals(), globals()) @@ -1056,11 +1056,11 @@ class CellCommands(object): ctxt = context.get_admin_context() db.cell_create(ctxt, values) - @args('--cell_id', dest='cell_id', metavar='<cell_id>', - help='ID of the cell to delete') - def delete(self, cell_id): + @args('--cell_name', dest='cell_name', metavar='<cell_name>', + help='Name of the cell to delete') + def delete(self, cell_name): ctxt = context.get_admin_context() - db.cell_delete(ctxt, cell_id) + db.cell_delete(ctxt, cell_name) def list(self): ctxt = context.get_admin_context() @@ -1128,8 +1128,13 @@ def add_command_parsers(subparsers): action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): - action_kwargs.append(kwargs['dest']) - kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] + if kwargs['dest'].startswith('action_kwarg_'): + action_kwargs.append( + kwargs['dest'][len('action_kwarg_'):]) + else: + action_kwargs.append(kwargs['dest']) + kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] + parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy index beee143f5..477510b99 100755 --- a/bin/nova-novncproxy +++ b/bin/nova-novncproxy @@ -21,20 +21,12 @@ Websocket proxy that is compatible with OpenStack Nova noVNC consoles. Leverages websockify.py by Joel Martin ''' -import Cookie import os -import socket import sys -import websockify - from nova import config -from nova.consoleauth import rpcapi as consoleauth_rpcapi -from nova import context +from nova.console import websocketproxy as ws from nova.openstack.common import cfg -from nova.openstack.common import log as logging -from nova.openstack.common import rpc -from nova import utils opts = [ @@ -69,64 +61,7 @@ opts = [ CONF = cfg.CONF CONF.register_cli_opts(opts) -LOG = logging.getLogger(__name__) - - -class NovaWebSocketProxy(websockify.WebSocketProxy): - def __init__(self, *args, **kwargs): - websockify.WebSocketProxy.__init__(self, unix_target=None, - target_cfg=None, - ssl_target=None, *args, **kwargs) - - def new_client(self): - """ - Called after a new WebSocket connection has been established. - """ - cookie = Cookie.SimpleCookie() - cookie.load(self.headers.getheader('cookie')) - token = cookie['token'].value - ctxt = context.get_admin_context() - rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() - connect_info = rpcapi.check_token(ctxt, token=token) - - if not connect_info: - LOG.audit("Invalid Token: %s", token) - raise Exception(_("Invalid Token")) - - host = connect_info['host'] - port = int(connect_info['port']) - - # Connect to the target - self.msg("connecting to: %s:%s" % (host, port)) - LOG.audit("connecting to: %s:%s" % (host, port)) - tsock = self.socket(host, port, connect=True) - - # Handshake as necessary - if connect_info.get('internal_access_path'): - tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" % - connect_info['internal_access_path']) - while True: - data = tsock.recv(4096, socket.MSG_PEEK) - if data.find("\r\n\r\n") != -1: - if not data.split("\r\n")[0].find("200"): - LOG.audit("Invalid Connection Info %s", token) - raise Exception(_("Invalid Connection Info")) - tsock.recv(len(data)) - break - - if self.verbose and not self.daemon: - print(self.traffic_legend) - - # Start proxying - try: - self.do_proxy(tsock) - except Exception: - if tsock: - tsock.shutdown(socket.SHUT_RDWR) - tsock.close() - self.vmsg("%s:%s: Target closed" % (host, port)) - LOG.audit("%s:%s: Target closed" % (host, port)) - raise +CONF.import_opt('debug', 'nova.openstack.common.log') if __name__ == '__main__': @@ -142,18 +77,18 @@ if __name__ == '__main__': sys.exit(-1) # Create and start the NovaWebSockets proxy - server = NovaWebSocketProxy(listen_host=CONF.novncproxy_host, - listen_port=CONF.novncproxy_port, - source_is_ipv6=CONF.source_is_ipv6, - verbose=CONF.verbose, - cert=CONF.cert, - key=CONF.key, - ssl_only=CONF.ssl_only, - daemon=CONF.daemon, - record=CONF.record, - web=CONF.web, - target_host='ignore', - target_port='ignore', - wrap_mode='exit', - wrap_cmd=None) + server = ws.NovaWebSocketProxy(listen_host=CONF.novncproxy_host, + listen_port=CONF.novncproxy_port, + source_is_ipv6=CONF.source_is_ipv6, + verbose=CONF.verbose, + cert=CONF.cert, + key=CONF.key, + ssl_only=CONF.ssl_only, + daemon=CONF.daemon, + record=CONF.record, + web=CONF.web, + target_host='ignore', + target_port='ignore', + wrap_mode='exit', + wrap_cmd=None) server.start_server() diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap index c8e880d79..72a8c6309 100755 --- a/bin/nova-rootwrap +++ b/bin/nova-rootwrap @@ -16,20 +16,18 @@ # License for the specific language governing permissions and limitations # under the License. -"""Root wrapper for Nova +"""Root wrapper for OpenStack services - Filters which commands nova is allowed to run as another user. + Filters which commands a service is allowed to run as another user. - To use this, you should set the following in nova.conf: + To use this with nova, you should set the following in nova.conf: rootwrap_config=/etc/nova/rootwrap.conf You also need to let the nova user run nova-rootwrap as root in sudoers: nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf * - To make allowed commands node-specific, your packaging should only - install {compute,network,volume}.filters respectively on compute, network - and volume nodes (i.e. nova-api nodes should not have any of those files - installed). + Service packaging should deploy .filters files only on nodes where they are + needed, to avoid allowing more than is necessary. """ import ConfigParser @@ -75,7 +73,7 @@ if __name__ == '__main__': if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) - from nova.rootwrap import wrapper + from nova.openstack.common.rootwrap import wrapper # Load configuration try: diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy new file mode 100755 index 000000000..089ff9d71 --- /dev/null +++ b/bin/nova-spicehtml5proxy @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +Websocket proxy that is compatible with OpenStack Nova +SPICE HTML5 consoles. Leverages websockify.py by Joel Martin +''' + +import os +import sys + +from nova import config +from nova.console import websocketproxy as ws +from nova.openstack.common import cfg + + +opts = [ + cfg.BoolOpt('record', + default=False, + help='Record sessions to FILE.[session_number]'), + cfg.BoolOpt('daemon', + default=False, + help='Become a daemon (background process)'), + cfg.BoolOpt('ssl_only', + default=False, + help='Disallow non-encrypted connections'), + cfg.BoolOpt('source_is_ipv6', + default=False, + help='Source is ipv6'), + cfg.StrOpt('cert', + default='self.pem', + help='SSL certificate file'), + cfg.StrOpt('key', + default=None, + help='SSL key file (if separate from cert)'), + cfg.StrOpt('web', + default='/usr/share/spice-html5', + help='Run webserver on same port. Serve files from DIR.'), + cfg.StrOpt('spicehtml5proxy_host', + default='0.0.0.0', + help='Host on which to listen for incoming requests'), + cfg.IntOpt('spicehtml5proxy_port', + default=6082, + help='Port on which to listen for incoming requests'), + ] + +CONF = cfg.CONF +CONF.register_cli_opts(opts) +CONF.import_opt('debug', 'nova.openstack.common.log') + + +if __name__ == '__main__': + if CONF.ssl_only and not os.path.exists(CONF.cert): + parser.error("SSL only and %s not found" % CONF.cert) + + # Setup flags + config.parse_args(sys.argv) + + # Check to see if spice html/js/css files are present + if not os.path.exists(CONF.web): + print "Can not find spice html/js/css files at %s." % CONF.web + sys.exit(-1) + + # Create and start the NovaWebSockets proxy + server = ws.NovaWebSocketProxy(listen_host=CONF.spicehtml5proxy_host, + listen_port=CONF.spicehtml5proxy_port, + source_is_ipv6=CONF.source_is_ipv6, + verbose=CONF.verbose, + cert=CONF.cert, + key=CONF.key, + ssl_only=CONF.ssl_only, + daemon=CONF.daemon, + record=CONF.record, + web=CONF.web, + target_host='ignore', + target_port='ignore', + wrap_mode='exit', + wrap_cmd=None) + server.start_server() diff --git a/contrib/xen/vif-openstack b/contrib/xen/vif-openstack new file mode 100755 index 000000000..1df6ad6ac --- /dev/null +++ b/contrib/xen/vif-openstack @@ -0,0 +1,39 @@ +#!/bin/bash + +## vim: set syn=on ts=4 sw=4 sts=0 noet foldmethod=indent: +## copyright: B1 Systems GmbH <info@b1-systems.de>, 2012. +## author: Christian Berendt <berendt@b1-systems.de>, 2012. +## license: Apache License, Version 2.0 +## +## purpose: +## Creates a new vif device without attaching it to a +## bridge. Quantum Linux Bridge Agent will attach the +## created device to the belonging bridge. +## +## usage: +## place the script in ${XEN_SCRIPT_DIR}/vif-openstack and +## set (vif-script vif-openstack) in /etc/xen/xend-config.sxp. + +dir=$(dirname "$0") +. "$dir/vif-common.sh" + +case "$command" in + online) + setup_virtual_bridge_port "$dev" + ip link set $dev up + ;; + + offline) + ip link set $dev down + ;; + + add) + setup_virtual_bridge_port "$dev" + ip link set $dev up + ;; +esac + +if [ "$type_if" = vif -a "$command" = "online" ] +then + success +fi diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index 42e86eadd..bd002c080 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -89,6 +89,14 @@ "updated": "2012-08-09T00:00:00+00:00" }, { + "alias": "os-cells", + "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", + "links": [], + "name": "Cells", + "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1", + "updated": "2011-09-21T00:00:00+00:00" + }, + { "alias": "os-certificates", "description": "Certificates support.", "links": [], @@ -297,19 +305,19 @@ "updated": "2012-08-07T00:00:00+00:00" }, { - "alias": "os-admin-networks", + "alias": "os-networks", "description": "Admin-only Network Management Extension.", "links": [], - "name": "AdminNetworks", - "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1", + "name": "Networks", + "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", "updated": "2011-12-23T00:00:00+00:00" }, { - "alias": "os-networks", + "alias": "os-tenant-networks", "description": "Tenant-based Network Management Extension.", "links": [], - "name": "OSNetworks", - "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", + "name": "OSTenantNetworks", + "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2", "updated": "2011-12-23T00:00:00+00:00" }, { diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index ea0b45a12..ebb1c4302 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -37,6 +37,12 @@ <extension alias="os-availability-zone" updated="2012-08-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone"> <description>Add availability_zone to the Create Server v1.1 API.</description> </extension> + <extension alias="os-cells" updated="2011-09-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells"> + <description>Enables cells-related functionality such as adding child cells, + listing child cells, getting the capabilities of the local cell, + and returning build plans to parent cells' schedulers + </description> + </extension> <extension alias="os-certificates" updated="2012-01-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates"> <description>Certificates support.</description> </extension> @@ -125,13 +131,13 @@ <extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate"> <description>Allow multiple create in the Create Server v1.1 API.</description> </extension> - <extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks"> + <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks"> <description>Admin-only Network Management Extension.</description> </extension> <extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport"> <description>Network association support.</description> </extension> - <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks"> + <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks"> <description>Tenant-based Network Management Extension.</description> </extension> <extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses"> diff --git a/doc/api_samples/os-cells/cells-get-resp.json b/doc/api_samples/os-cells/cells-get-resp.json new file mode 100644 index 000000000..62eb8ec31 --- /dev/null +++ b/doc/api_samples/os-cells/cells-get-resp.json @@ -0,0 +1,9 @@ +{ + "cell": { + "name": "cell3", + "rpc_host": null, + "rpc_port": null, + "type": "child", + "username": "username3" + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-get-resp.xml b/doc/api_samples/os-cells/cells-get-resp.xml new file mode 100644 index 000000000..12256a5bd --- /dev/null +++ b/doc/api_samples/os-cells/cells-get-resp.xml @@ -0,0 +1,2 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/>
\ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.json b/doc/api_samples/os-cells/cells-list-empty-resp.json new file mode 100644 index 000000000..5325a4e85 --- /dev/null +++ b/doc/api_samples/os-cells/cells-list-empty-resp.json @@ -0,0 +1,3 @@ +{ + "cells": [] +}
\ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-list-empty-resp.xml b/doc/api_samples/os-cells/cells-list-empty-resp.xml new file mode 100644 index 000000000..6ac77b4bd --- /dev/null +++ b/doc/api_samples/os-cells/cells-list-empty-resp.xml @@ -0,0 +1,2 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/>
\ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-list-resp.json b/doc/api_samples/os-cells/cells-list-resp.json new file mode 100644 index 000000000..97ea4c6dd --- /dev/null +++ b/doc/api_samples/os-cells/cells-list-resp.json @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "name": "cell1", + "rpc_host": null, + "rpc_port": null, + "type": "child", + "username": "username1" + }, + { + "name": "cell3", + "rpc_host": null, + "rpc_port": null, + "type": "child", + "username": "username3" + }, + { + "name": "cell5", + "rpc_host": null, + "rpc_port": null, + "type": "child", + "username": "username5" + }, + { + "name": "cell2", + "rpc_host": null, + "rpc_port": null, + "type": "parent", + "username": "username2" + }, + { + "name": "cell4", + "rpc_host": null, + "rpc_port": null, + "type": "parent", + "username": "username4" + } + ] +}
\ No newline at end of file diff --git a/doc/api_samples/os-cells/cells-list-resp.xml b/doc/api_samples/os-cells/cells-list-resp.xml new file mode 100644 index 000000000..7d697bb91 --- /dev/null +++ b/doc/api_samples/os-cells/cells-list-resp.xml @@ -0,0 +1,8 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + <cell username="username1" rpc_host="None" type="child" name="cell1" rpc_port="None"/> + <cell username="username3" rpc_host="None" type="child" name="cell3" rpc_port="None"/> + <cell username="username5" rpc_host="None" type="child" name="cell5" rpc_port="None"/> + <cell username="username2" rpc_host="None" type="parent" name="cell2" rpc_port="None"/> + <cell username="username4" rpc_host="None" type="parent" name="cell4" rpc_port="None"/> +</cells>
\ No newline at end of file diff --git a/doc/api_samples/os-consoles/get-spice-console-post-req.json b/doc/api_samples/os-consoles/get-spice-console-post-req.json new file mode 100644 index 000000000..d04f7c7ae --- /dev/null +++ b/doc/api_samples/os-consoles/get-spice-console-post-req.json @@ -0,0 +1,5 @@ +{ + "os-getSPICEConsole": { + "type": "spice-html5" + } +} diff --git a/doc/api_samples/os-consoles/get-spice-console-post-req.xml b/doc/api_samples/os-consoles/get-spice-console-post-req.xml new file mode 100644 index 000000000..59052abea --- /dev/null +++ b/doc/api_samples/os-consoles/get-spice-console-post-req.xml @@ -0,0 +1,2 @@ +<?xml version="1.0" encoding="UTF-8"?> +<os-getSPICEConsole type="spice-html5" /> diff --git a/doc/api_samples/os-consoles/get-spice-console-post-resp.json b/doc/api_samples/os-consoles/get-spice-console-post-resp.json new file mode 100644 index 000000000..f4999e1ba --- /dev/null +++ b/doc/api_samples/os-consoles/get-spice-console-post-resp.json @@ -0,0 +1,6 @@ +{ + "console": { + "type": "spice-html5", + "url": "http://example.com:6080/spice_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3&title=dafa(75ecef58-3b8e-4659-ab3b-5501454188e9)" + } +} diff --git a/doc/api_samples/os-consoles/get-spice-console-post-resp.xml b/doc/api_samples/os-consoles/get-spice-console-post-resp.xml new file mode 100644 index 000000000..acba8b1f0 --- /dev/null +++ b/doc/api_samples/os-consoles/get-spice-console-post-resp.xml @@ -0,0 +1,5 @@ +<?xml version='1.0' encoding='UTF-8'?> +<console> + <type>spice-html5</type> + <url>http://example.com:6080/spice_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3</url> +</console> diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-req.json b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json index 8a3830f09..0c5914a01 100644 --- a/doc/api_samples/os-flavor-manage/flavor-create-post-req.json +++ b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json @@ -4,6 +4,6 @@ "ram": 1024, "vcpus": 2, "disk": 10, - "id": "10", + "id": "10" } } diff --git a/doc/api_samples/os-hosts/hosts-list-resp.json b/doc/api_samples/os-hosts/hosts-list-resp.json index 5a963c602..0c4126a7e 100644 --- a/doc/api_samples/os-hosts/hosts-list-resp.json +++ b/doc/api_samples/os-hosts/hosts-list-resp.json @@ -24,6 +24,11 @@ "host_name": "6e48bfe1a3304b7b86154326328750ae", "service": "conductor", "zone": "internal" + }, + { + "host_name": "39f55087a1024d1380755951c945ca69", + "service": "cells", + "zone": "internal" } ] } diff --git a/doc/api_samples/os-hosts/hosts-list-resp.xml b/doc/api_samples/os-hosts/hosts-list-resp.xml index 8266a5d49..9a99c577a 100644 --- a/doc/api_samples/os-hosts/hosts-list-resp.xml +++ b/doc/api_samples/os-hosts/hosts-list-resp.xml @@ -5,4 +5,5 @@ <host host_name="2d1bdd671b5d41fd89dec74be5770c63" service="network"/> <host host_name="7c2dd5ecb7494dd1bf4240b7f7f9bf3a" service="scheduler"/> <host host_name="f9c273d8e03141a2a01def0ad18e5be4" service="conductor"/> -</hosts>
\ No newline at end of file + <host host_name="2b893569cd824b979bd80a2c94570a1f" service="cells"/> +</hosts> diff --git a/doc/api_samples/os-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json index b857e8112..b857e8112 100644 --- a/doc/api_samples/os-networks/networks-list-res.json +++ b/doc/api_samples/os-tenant-networks/networks-list-res.json diff --git a/doc/api_samples/os-networks/networks-post-res.json b/doc/api_samples/os-tenant-networks/networks-post-res.json index 536a9a0a4..536a9a0a4 100644 --- a/doc/api_samples/os-networks/networks-post-res.json +++ b/doc/api_samples/os-tenant-networks/networks-post-res.json diff --git a/doc/source/conf.py b/doc/source/conf.py index 804080e79..0bdaeb08e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -145,6 +145,8 @@ man_pages = [ [u'OpenStack'], 1), ('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric', [u'OpenStack'], 1), + ('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric', + [u'OpenStack'], 1), ('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric', [u'OpenStack'], 1), ('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric', diff --git a/doc/source/devref/aggregates.rst b/doc/source/devref/aggregates.rst index 979179768..ecc6329ba 100644 --- a/doc/source/devref/aggregates.rst +++ b/doc/source/devref/aggregates.rst @@ -23,7 +23,7 @@ Host aggregates can be regarded as a mechanism to further partition an availabil Xen Pool Host Aggregates =============== -Originally all aggregates were Xen resource pools, now a aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair. +Originally all aggregates were Xen resource pools, now an aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair. You can use aggregates for XenServer resource pools when you have multiple compute nodes installed (only XenServer/XCP via xenapi driver is currently supported), and you want to leverage the capabilities of the underlying hypervisor resource pools. For example, you want to enable VM live migration (i.e. VM migration within the pool) or enable host maintenance with zero-downtime for guest instances. Please, note that VM migration across pools (i.e. storage migration) is not yet supported in XenServer/XCP, but will be added when available. Bear in mind that the two migration techniques are not mutually exclusive and can be used in combination for a higher level of flexibility in your cloud management. @@ -65,7 +65,7 @@ Usage * aggregate-add-host <id> <host> Add the host to the specified aggregate. * aggregate-remove-host <id> <host> Remove the specified host from the specfied aggregate. * aggregate-set-metadata <id> <key=value> [<key=value> ...] Update the metadata associated with the aggregate. - * aggregate-update <id> <name> [<availability_zone>] Update the aggregate's name and optionally availablity zone. + * aggregate-update <id> <name> [<availability_zone>] Update the aggregate's name and optionally availability zone. * host-list List all hosts by service * host-update --maintenance [enable | disable] Put/resume host into/from maintenance. diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 4eb695963..a366c4893 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -70,7 +70,7 @@ On Ubuntu Precise (12.04) you may also need to add the following packages:: On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux):: - sudo yum install python-devel openssl-devel python-pip git + sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel Mac OS X Systems diff --git a/doc/source/man/nova-spicehtml5proxy.rst b/doc/source/man/nova-spicehtml5proxy.rst new file mode 100644 index 000000000..4d0aaa202 --- /dev/null +++ b/doc/source/man/nova-spicehtml5proxy.rst @@ -0,0 +1,48 @@ +==================== +nova-spicehtml5proxy +==================== + +-------------------------------------------------------- +Websocket Proxy for OpenStack Nova SPICE HTML5 consoles. +-------------------------------------------------------- + +:Author: openstack@lists.launchpad.net +:Date: 2012-09-27 +:Copyright: OpenStack LLC +:Version: 2012.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + nova-spicehtml5proxy [options] + +DESCRIPTION +=========== + +Websocket proxy that is compatible with OpenStack Nova +SPICE HTML5 consoles. + +OPTIONS +======= + + **General options** + +FILES +======== + +* /etc/nova/nova.conf +* /etc/nova/policy.json +* /etc/nova/rootwrap.conf +* /etc/nova/rootwrap.d/ + +SEE ALSO +======== + +* `OpenStack Nova <http://nova.openstack.org>`__ + +BUGS +==== + +* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__ diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index 85603fe59..08d59c521 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -62,23 +62,12 @@ use = call:nova.api.openstack.urlmap:urlmap_factory /v1.1: openstack_compute_api_v2 /v2: openstack_compute_api_v2 -[composite:osapi_volume] -use = call:nova.api.openstack.urlmap:urlmap_factory -/: osvolumeversions -/v1: openstack_volume_api_v1 - [composite:openstack_compute_api_v2] use = call:nova.api.auth:pipeline_factory noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2 keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2 keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2 -[composite:openstack_volume_api_v1] -use = call:nova.api.auth:pipeline_factory -noauth = faultwrap sizelimit noauth ratelimit osapi_volume_app_v1 -keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_volume_app_v1 -keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1 - [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory @@ -97,18 +86,9 @@ paste.app_factory = nova.api.openstack.compute:APIRouter.factory [pipeline:oscomputeversions] pipeline = faultwrap oscomputeversionapp -[app:osapi_volume_app_v1] -paste.app_factory = nova.api.openstack.volume:APIRouter.factory - [app:oscomputeversionapp] paste.app_factory = nova.api.openstack.compute.versions:Versions.factory -[pipeline:osvolumeversions] -pipeline = faultwrap osvolumeversionapp - -[app:osvolumeversionapp] -paste.app_factory = nova.api.openstack.volume.versions:Versions.factory - ########## # Shared # ########## diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample index 77133d988..a5f945618 100644 --- a/etc/nova/nova.conf.sample +++ b/etc/nova/nova.conf.sample @@ -1,47 +1,6 @@ [DEFAULT] # -# Options defined in nova.openstack.common.cfg:CommonConfigOpts -# - -# Print debugging output (boolean value) -#debug=false - -# Print more verbose output (boolean value) -#verbose=false - -# If this option is specified, the logging configuration file -# specified is used and overrides any other logging options -# specified. Please see the Python logging module -# documentation for details on logging configuration files. -# (string value) -#log_config=<None> - -# A logging.Formatter log message format string which may use -# any of the available logging.LogRecord attributes. Default: -# %(default)s (string value) -#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s - -# Format string for %%(asctime)s in log records. Default: -# %(default)s (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If not set, -# logging will go to stdout. (string value) -#log_file=<None> - -# (Optional) The directory to keep log files in (will be -# prepended to --log-file) (string value) -#log_dir=<None> - -# Use syslog for logging. (boolean value) -#use_syslog=false - -# syslog facility to receive log lines (string value) -#syslog_log_facility=LOG_USER - - -# # Options defined in nova.availability_zones # @@ -486,6 +445,22 @@ # +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quotaing of tenant networks (boolean +# value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_quantum_default_nets=False + +# Default tenant id when creating quantum networks (string +# value) +#quantum_default_tenant_id=default + + +# # Options defined in nova.api.openstack.compute.extensions # @@ -1123,10 +1098,6 @@ # Autoassigning floating ip to VM (boolean value) #auto_assign_floating_ip=false -# Network host to use for ip allocation in flat modes (string -# value) -#network_host=nova - # If passed, use fake network devices and addresses (boolean # value) #fake_network=false @@ -1207,6 +1178,10 @@ # (string value) #quantum_auth_strategy=keystone +# Name of Integration Bridge used by Open vSwitch (string +# value) +#quantum_ovs_bridge=br-int + # # Options defined in nova.network.rpcapi @@ -1253,6 +1228,14 @@ # Options defined in nova.openstack.common.log # +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + # Log output to standard error (boolean value) #use_stderr=true @@ -1262,11 +1245,11 @@ # format string to use for log messages with context (string # value) -#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s +#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s # format string to use for log messages without context # (string value) -#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # data to append to log format when level is DEBUG (string # value) @@ -1274,7 +1257,7 @@ # prefix each line of exception output with this format # (string value) -#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s # list of logger=LEVEL pairs (list value) #default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN @@ -1293,6 +1276,36 @@ # it like this (string value) #instance_uuid_format="[instance: %(uuid)s] " +# If this option is specified, the logging configuration file +# specified is used and overrides any other logging options +# specified. Please see the Python logging module +# documentation for details on logging configuration files. +# (string value) +#log_config=<None> + +# A logging.Formatter log message format string which may use +# any of the available logging.LogRecord attributes. Default: +# %(default)s (string value) +#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If not set, +# logging will go to stdout. (string value) +#log_file=<None> + +# (Optional) The directory to keep log files in (will be +# prepended to --log-file) (string value) +#log_dir=<None> + +# Use syslog for logging. (boolean value) +#use_syslog=false + +# syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + # # Options defined in nova.openstack.common.notifier.api @@ -1724,13 +1737,18 @@ # -# Options defined in nova.virt.hyperv.vmops +# Options defined in nova.virt.hyperv.vif # -# Default vSwitch Name, if none provided first external is -# used (string value) +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) #vswitch_name=<None> + +# +# Options defined in nova.virt.hyperv.vmops +# + # Required for live migration among hosts with different CPU # features (boolean value) #limit_cpu_features=false @@ -1756,7 +1774,7 @@ # value) #hyperv_attaching_volume_retry_count=10 -# The seconds to wait between an volume attachment attempt +# The seconds to wait between a volume attachment attempt # (integer value) #hyperv_wait_between_attach_retry=5 @@ -1985,26 +2003,26 @@ # Options defined in nova.virt.vmwareapi.driver # -# URL for connection to VMWare ESX host.Required if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# URL for connection to VMware ESX host.Required if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_ip=<None> -# Username for connection to VMWare ESX host. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# Username for connection to VMware ESX host. Used only if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_username=<None> -# Password for connection to VMWare ESX host. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# Password for connection to VMware ESX host. Used only if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_password=<None> # The interval used for polling of remote tasks. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (floating point +# compute_driver is vmwareapi.VMwareESXDriver. (floating point # value) #vmwareapi_task_poll_interval=5.0 # The number of times we retry on failures, e.g., socket # error, etc. Used only if compute_driver is -# vmwareapi.VMWareESXDriver. (integer value) +# vmwareapi.VMwareESXDriver. (integer value) #vmwareapi_api_retry_count=10 @@ -2278,10 +2296,17 @@ # (string value) #cinder_endpoint_template=<None> +# region name of this node (string value) +#os_region_name=<None> + # Number of cinderclient retries on failed http calls (integer # value) #cinder_http_retries=3 +# Allow to perform insecure SSL requests to cinder (boolean +# value) +#cinder_api_insecure=false + [conductor] @@ -2476,7 +2501,7 @@ # # Do not set this out of dev/test environments. If a node does -# not have an fixed PXE IP address, volumes are exported with +# not have a fixed PXE IP address, volumes are exported with # globally opened ACL (boolean value) #use_unsafe_iscsi=false @@ -2518,4 +2543,32 @@ #attestation_auth_blob=<None> -# Total option count: 514 +[spice] + +# +# Options defined in nova.spice +# + +# location of spice html5 console proxy, in the form +# "http://127.0.0.1:6080/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6080/spice_auto.html + +# IP address on which instance spice server should listen +# (string value) +#server_listen=127.0.0.1 + +# the address to which proxy clients (like nova- +# spicehtml5proxy) should connect (string value) +#server_proxyclient_address=127.0.0.1 + +# enable spice related features (boolean value) +#enabled=false + +# enable spice guest agent support (boolean value) +#agent_enabled=true + +# keymap for spice (string value) +#keymap=en-us + + +# Total option count: 525 diff --git a/etc/nova/policy.json b/etc/nova/policy.json index 04766371e..f85ab9758 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -29,6 +29,7 @@ "compute_extension:admin_actions:migrate": "rule:admin_api", "compute_extension:aggregates": "rule:admin_api", "compute_extension:agents": "rule:admin_api", + "compute_extension:cells": "rule:admin_api", "compute_extension:certificates": "", "compute_extension:cloudpipe": "rule:admin_api", "compute_extension:cloudpipe_update": "rule:admin_api", @@ -82,6 +83,8 @@ "compute_extension:virtual_storage_arrays": "", "compute_extension:volumes": "", "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:availability_zone:detail": "rule:admin_api", "volume:create": "", diff --git a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters deleted file mode 100644 index 35fa61723..000000000 --- a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters +++ /dev/null @@ -1,11 +0,0 @@ -# nova-rootwrap command filters for compute nodes -# This file should be owned by (and only-writeable by) the root user - -[Filters] - -# nova/virt/baremetal/pxe.py: 'dnsmasq', ... -dnsmasq: CommandFilter, /usr/sbin/dnsmasq, root - -# nova/virt/baremetal/pxe.py: 'kill', '-TERM', str(dnsmasq_pid) -kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -15, -TERM - diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters index f344a1b1c..e1113a9e7 100644 --- a/etc/nova/rootwrap.d/compute.filters +++ b/etc/nova/rootwrap.d/compute.filters @@ -99,9 +99,11 @@ pygrub: CommandFilter, /usr/bin/pygrub, root fdisk: CommandFilter, /sbin/fdisk, root # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path +# nova/virt/disk/api.py: e2fsck, -f, -p, image e2fsck: CommandFilter, /sbin/e2fsck, root # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path +# nova/virt/disk/api.py: resize2fs, image resize2fs: CommandFilter, /sbin/resize2fs, root # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 7cd7e1c7d..85b87e3e5 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -511,7 +511,13 @@ class Executor(wsgi.Application): except exception.KeyPairExists as ex: LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), context=context) - return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) + code = 'InvalidKeyPair.Duplicate' + return ec2_error(req, request_id, code, unicode(ex)) + except exception.InvalidKeypair as ex: + LOG.debug(_('InvalidKeypair raised: %s'), unicode(ex), + context) + code = 'InvalidKeyPair.Format' + return ec2_error(req, request_id, code, unicode(ex)) except exception.InvalidParameterValue as ex: LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), context=context) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 73a4a02ae..48b0f632f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,6 +27,7 @@ import time from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state +from nova.api.metadata import password from nova.api import validator from nova import availability_zones from nova import block_device @@ -148,7 +149,7 @@ def _properties_get_mappings(properties): def _format_block_device_mapping(bdm): - """Contruct BlockDeviceMappingItemType + """Construct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ @@ -249,32 +250,10 @@ class CloudController(object): else: return self._describe_availability_zones(context, **kwargs) - def _get_zones(self, context): - """Return available and unavailable zones.""" - enabled_services = db.service_get_all(context, False) - disabled_services = db.service_get_all(context, True) - enabled_services = availability_zones.set_availability_zones(context, - enabled_services) - disabled_services = availability_zones.set_availability_zones(context, - disabled_services) - - available_zones = [] - for zone in [service['availability_zone'] for service - in enabled_services]: - if not zone in available_zones: - available_zones.append(zone) - - not_available_zones = [] - zones = [service['available_zones'] for service in disabled_services - if service['available_zones'] not in available_zones] - for zone in zones: - if zone not in not_available_zones: - not_available_zones.append(zone) - return (available_zones, not_available_zones) - def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = self._get_zones(ctxt) + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) result = [] for zone in available_zones: @@ -290,7 +269,8 @@ class CloudController(object): def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = self._get_zones(ctxt) + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) # Available services enabled_services = db.service_get_all(context, False) @@ -433,7 +413,8 @@ class CloudController(object): #If looking for non existent key pair if key_name is not None and not key_pairs: msg = _('Could not find key pair(s): %s') % ','.join(key_name) - raise exception.EC2APIError(msg) + raise exception.KeypairNotFound(msg, + code="InvalidKeyPair.Duplicate") result = [] for key_pair in key_pairs: @@ -456,13 +437,7 @@ class CloudController(object): key_name) except exception.KeypairLimitExceeded: msg = _("Quota exceeded, too many key pairs.") - raise exception.EC2APIError(msg) - except exception.InvalidKeypair: - msg = _("Keypair data is invalid") - raise exception.EC2APIError(msg) - except exception.KeyPairExists: - msg = _("Key pair '%s' already exists.") % key_name - raise exception.KeyPairExists(msg) + raise exception.EC2APIError(msg, code='ResourceLimitExceeded') return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint'], 'keyMaterial': keypair['private_key']} @@ -485,9 +460,6 @@ class CloudController(object): except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.EC2APIError(msg) - except exception.KeyPairExists: - msg = _("Key pair '%s' already exists.") % key_name - raise exception.EC2APIError(msg) return {'keyName': key_name, 'keyFingerprint': keypair['fingerprint']} @@ -758,6 +730,23 @@ class CloudController(object): return True + def get_password_data(self, context, instance_id, **kwargs): + # instance_id may be passed in as a list of instances + if isinstance(instance_id, list): + ec2_id = instance_id[0] + else: + ec2_id = instance_id + validate_ec2_id(ec2_id) + instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id) + instance = self.compute_api.get(context, instance_uuid) + output = password.extract_password(instance) + # NOTE(vish): this should be timestamp from the metadata fields + # but it isn't important enough to implement properly + now = timeutils.utcnow() + return {"InstanceId": ec2_id, + "Timestamp": now, + "passwordData": output} + def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index cfe0d7879..bc47b3e0d 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -117,7 +117,8 @@ def get_ip_info_for_instance(context, instance): def get_availability_zone_by_host(services, host): if len(services) > 0: - return availability_zones.get_host_availability_zone(context, host) + return availability_zones.get_host_availability_zone( + context.get_admin_context(), host) return 'unknown zone' @@ -178,7 +179,7 @@ def ec2_vol_id_to_uuid(ec2_id): def is_ec2_timestamp_expired(request, expires=None): - """Checks the timestamp or expiry time included in a EC2 request + """Checks the timestamp or expiry time included in an EC2 request and returns true if the request is expired """ query_time = None diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py index f345d9617..1c053ea59 100644 --- a/nova/api/openstack/compute/contrib/admin_actions.py +++ b/nova/api/openstack/compute/contrib/admin_actions.py @@ -130,7 +130,7 @@ class AdminActionsController(wsgi.Controller): @wsgi.action('resetNetwork') def _reset_network(self, req, id, body): - """Permit admins to reset networking on an server.""" + """Permit admins to reset networking on a server.""" context = req.environ['nova.context'] authorize(context, 'resetNetwork') try: @@ -307,9 +307,7 @@ class AdminActionsController(wsgi.Controller): try: instance = self.compute_api.get(context, id) - self.compute_api.update(context, instance, - vm_state=state, - task_state=None) + self.compute_api.update_state(context, instance, state) except exception.InstanceNotFound: raise exc.HTTPNotFound(_("Server not found")) except Exception: diff --git a/nova/api/openstack/compute/contrib/admin_networks.py b/nova/api/openstack/compute/contrib/admin_networks.py deleted file mode 100644 index f5facd601..000000000 --- a/nova/api/openstack/compute/contrib/admin_networks.py +++ /dev/null @@ -1,170 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 Grid Dynamics -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -import webob -from webob import exc - -from nova.api.openstack import extensions -from nova.api.openstack import wsgi -from nova import exception -from nova import network -from nova.openstack.common import log as logging - -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('compute', 'admin_networks') -authorize_view = extensions.extension_authorizer('compute', - 'admin_networks:view') - - -def network_dict(context, network): - fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', - 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') - admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', - 'injected', 'bridge', 'vlan', 'vpn_public_address', - 'vpn_public_port', 'vpn_private_address', 'dhcp_start', - 'project_id', 'host', 'bridge_interface', 'multi_host', - 'priority', 'rxtx_base') - if network: - # NOTE(mnaser): We display a limited set of fields so users can know - # what networks are available, extra system-only fields - # are only visible if they are an admin. - if context.is_admin: - fields += admin_fields - result = dict((field, network[field]) for field in fields) - if 'uuid' in network: - result['id'] = network['uuid'] - return result - else: - return {} - - -class AdminNetworkController(wsgi.Controller): - - def __init__(self, network_api=None): - self.network_api = network_api or network.API() - - def index(self, req): - context = req.environ['nova.context'] - authorize_view(context) - networks = self.network_api.get_all(context) - result = [network_dict(context, net_ref) for net_ref in networks] - return {'networks': result} - - @wsgi.action("disassociate") - def _disassociate_host_and_project(self, req, id, body): - context = req.environ['nova.context'] - authorize(context) - LOG.debug(_("Disassociating network with id %s"), id) - - try: - self.network_api.associate(context, id, host=None, project=None) - except exception.NetworkNotFound: - raise exc.HTTPNotFound(_("Network not found")) - return exc.HTTPAccepted() - - def show(self, req, id): - context = req.environ['nova.context'] - authorize_view(context) - LOG.debug(_("Showing network with id %s") % id) - try: - network = self.network_api.get(context, id) - except exception.NetworkNotFound: - raise exc.HTTPNotFound(_("Network not found")) - return {'network': network_dict(context, network)} - - def delete(self, req, id): - context = req.environ['nova.context'] - authorize(context) - LOG.info(_("Deleting network with id %s") % id) - try: - self.network_api.delete(context, id) - except exception.NetworkNotFound: - raise exc.HTTPNotFound(_("Network not found")) - return exc.HTTPAccepted() - - def create(self, req, body): - context = req.environ['nova.context'] - authorize(context) - - def bad(e): - return exc.HTTPUnprocessableEntity(explanation=e) - - if not (body and body.get("network")): - raise bad(_("Missing network in body")) - - params = body["network"] - if not params.get("label"): - raise bad(_("Network label is required")) - - cidr = params.get("cidr") or params.get("cidr_v6") - if not cidr: - raise bad(_("Network cidr or cidr_v6 is required")) - - LOG.debug(_("Creating network with label %s") % params["label"]) - - params["num_networks"] = 1 - params["network_size"] = netaddr.IPNetwork(cidr).size - - network = self.network_api.create(context, **params)[0] - return {"network": network_dict(context, network)} - - def add(self, req, body): - context = req.environ['nova.context'] - authorize(context) - if not body: - raise exc.HTTPUnprocessableEntity() - - network_id = body.get('id', None) - project_id = context.project_id - LOG.debug(_("Associating network %(network)s" - " with project %(project)s") % - {"network": network_id or "", - "project": project_id}) - try: - self.network_api.add_network_to_project( - context, project_id, network_id) - except Exception as ex: - msg = (_("Cannot associate network %(network)s" - " with project %(project)s: %(message)s") % - {"network": network_id or "", - "project": project_id, - "message": getattr(ex, "value", str(ex))}) - raise exc.HTTPBadRequest(explanation=msg) - - return webob.Response(status_int=202) - - -class Admin_networks(extensions.ExtensionDescriptor): - """Admin-only Network Management Extension.""" - - name = "AdminNetworks" - alias = "os-admin-networks" - namespace = ("http://docs.openstack.org/compute/" - "ext/os-admin-networks/api/v1.1") - updated = "2011-12-23T00:00:00+00:00" - - def get_resources(self): - member_actions = {'action': 'POST'} - collection_actions = {'add': 'POST'} - res = extensions.ResourceExtension( - 'os-admin-networks', - AdminNetworkController(), - member_actions=member_actions, - collection_actions=collection_actions) - return [res] diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py index 2955b68eb..6cde5ca64 100644 --- a/nova/api/openstack/compute/contrib/availability_zone.py +++ b/nova/api/openstack/compute/contrib/availability_zone.py @@ -14,14 +14,165 @@ # License for the specific language governing permissions and limitations # under the License +from nova.api.openstack import common from nova.api.openstack import extensions +from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil +from nova import availability_zones +from nova import db +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova import servicegroup + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +authorize_list = extensions.extension_authorizer('compute', + 'availability_zone:list') +authorize_detail = extensions.extension_authorizer('compute', + 'availability_zone:detail') + + +def make_availability_zone(elem): + elem.set('name', 'zoneName') + + zoneStateElem = xmlutil.SubTemplateElement(elem, 'zoneState', + selector='zoneState') + zoneStateElem.set('available') + + hostsElem = xmlutil.SubTemplateElement(elem, 'hosts', selector='hosts') + hostElem = xmlutil.SubTemplateElement(hostsElem, 'host', + selector=xmlutil.get_items) + hostElem.set('name', 0) + + svcsElem = xmlutil.SubTemplateElement(hostElem, 'services', selector=1) + svcElem = xmlutil.SubTemplateElement(svcsElem, 'service', + selector=xmlutil.get_items) + svcElem.set('name', 0) + + svcStateElem = xmlutil.SubTemplateElement(svcElem, 'serviceState', + selector=1) + svcStateElem.set('available') + svcStateElem.set('active') + svcStateElem.set('updated_at') + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +class AvailabilityZonesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('availabilityZones') + zoneElem = xmlutil.SubTemplateElement(root, 'availabilityZone', + selector='availabilityZoneInfo') + make_availability_zone(zoneElem) + return xmlutil.MasterTemplate(root, 1, nsmap={ + Availability_zone.alias: Availability_zone.namespace}) + + +class AvailabilityZoneController(wsgi.Controller): + """The Availability Zone API controller for the OpenStack API.""" + + def __init__(self): + super(AvailabilityZoneController, self).__init__() + self.servicegroup_api = servicegroup.API() + + def _describe_availability_zones(self, context, **kwargs): + ctxt = context.elevated() + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) + + result = [] + for zone in available_zones: + # Hide internal_service_availability_zone + if zone == CONF.internal_service_availability_zone: + continue + result.append({'zoneName': zone, + 'zoneState': {'available': True}, + "hosts": None}) + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': {'available': False}, + "hosts": None}) + return {'availabilityZoneInfo': result} + + def _describe_availability_zones_verbose(self, context, **kwargs): + ctxt = context.elevated() + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) + + # Available services + enabled_services = db.service_get_all(context, False) + enabled_services = availability_zones.set_availability_zones(context, + enabled_services) + zone_hosts = {} + host_services = {} + for service in enabled_services: + zone_hosts.setdefault(service['availability_zone'], []) + if not service['host'] in zone_hosts[service['availability_zone']]: + zone_hosts[service['availability_zone']].append( + service['host']) + + host_services.setdefault(service['availability_zone'] + + service['host'], []) + host_services[service['availability_zone'] + service['host']].\ + append(service) + + result = [] + for zone in available_zones: + hosts = {} + for host in zone_hosts[zone]: + hosts[host] = {} + for service in host_services[zone + host]: + alive = self.servicegroup_api.service_is_up(service) + hosts[host][service['binary']] = {'available': alive, + 'active': True != service['disabled'], + 'updated_at': service['updated_at']} + result.append({'zoneName': zone, + 'zoneState': {'available': True}, + "hosts": hosts}) + + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': {'available': False}, + "hosts": None}) + return {'availabilityZoneInfo': result} + + @wsgi.serializers(xml=AvailabilityZonesTemplate) + def index(self, req): + """Returns a summary list of availability zone.""" + context = req.environ['nova.context'] + authorize_list(context) + + return self._describe_availability_zones(context) + + @wsgi.serializers(xml=AvailabilityZonesTemplate) + def detail(self, req): + """Returns a detailed list of availability zone.""" + context = req.environ['nova.context'] + authorize_detail(context) + + return self._describe_availability_zones_verbose(context) class Availability_zone(extensions.ExtensionDescriptor): - """Add availability_zone to the Create Server v1.1 API.""" + """1. Add availability_zone to the Create Server v1.1 API. + 2. Add availability zones describing. + """ name = "AvailabilityZone" alias = "os-availability-zone" namespace = ("http://docs.openstack.org/compute/ext/" "availabilityzone/api/v1.1") - updated = "2012-08-09T00:00:00+00:00" + updated = "2012-12-21T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-availability-zone', + AvailabilityZoneController(), + collection_actions={'detail': 'GET'}) + resources.append(res) + + return resources diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py new file mode 100644 index 000000000..03e2e4ca2 --- /dev/null +++ b/nova/api/openstack/compute/contrib/cells.py @@ -0,0 +1,303 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011-2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The cells extension.""" +from xml.dom import minidom +from xml.parsers import expat + +from webob import exc + +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil +from nova.cells import rpcapi as cells_rpcapi +from nova.compute import api as compute +from nova import db +from nova import exception +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.import_opt('name', 'nova.cells.opts', group='cells') +CONF.import_opt('capabilities', 'nova.cells.opts', group='cells') + +authorize = extensions.extension_authorizer('compute', 'cells') + + +def make_cell(elem): + elem.set('name') + elem.set('username') + elem.set('type') + elem.set('rpc_host') + elem.set('rpc_port') + + caps = xmlutil.SubTemplateElement(elem, 'capabilities', + selector='capabilities') + cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0), + selector=xmlutil.get_items) + cap.text = 1 + + +cell_nsmap = {None: wsgi.XMLNS_V10} + + +class CellTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('cell', selector='cell') + make_cell(root) + return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap) + + +class CellsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('cells') + elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells') + make_cell(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap) + + +class CellDeserializer(wsgi.XMLDeserializer): + """Deserializer to handle xml-formatted cell create requests.""" + + def _extract_capabilities(self, cap_node): + caps = {} + for cap in cap_node.childNodes: + cap_name = cap.tagName + caps[cap_name] = self.extract_text(cap) + return caps + + def _extract_cell(self, node): + cell = {} + cell_node = self.find_first_child_named(node, 'cell') + + extract_fns = {'capabilities': self._extract_capabilities} + + for child in cell_node.childNodes: + name = child.tagName + extract_fn = extract_fns.get(name, self.extract_text) + cell[name] = extract_fn(child) + return cell + + def default(self, string): + """Deserialize an xml-formatted cell create request.""" + try: + node = minidom.parseString(string) + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + return {'body': {'cell': self._extract_cell(node)}} + + +def _filter_keys(item, keys): + """ + Filters all model attributes except for keys + item is a dict + + """ + return dict((k, v) for k, v in item.iteritems() if k in keys) + + +def _scrub_cell(cell, detail=False): + keys = ['name', 'username', 'rpc_host', 'rpc_port'] + if detail: + keys.append('capabilities') + + cell_info = _filter_keys(cell, keys) + cell_info['type'] = 'parent' if cell['is_parent'] else 'child' + return cell_info + + +class Controller(object): + """Controller for Cell resources.""" + + def __init__(self): + self.compute_api = compute.API() + self.cells_rpcapi = cells_rpcapi.CellsAPI() + + def _get_cells(self, ctxt, req, detail=False): + """Return all cells.""" + # Ask the CellsManager for the most recent data + items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt) + items = common.limited(items, req) + items = [_scrub_cell(item, detail=detail) for item in items] + return dict(cells=items) + + @wsgi.serializers(xml=CellsTemplate) + def index(self, req): + """Return all cells in brief.""" + ctxt = req.environ['nova.context'] + authorize(ctxt) + return self._get_cells(ctxt, req) + + @wsgi.serializers(xml=CellsTemplate) + def detail(self, req): + """Return all cells in detail.""" + ctxt = req.environ['nova.context'] + authorize(ctxt) + return self._get_cells(ctxt, req, detail=True) + + @wsgi.serializers(xml=CellTemplate) + def info(self, req): + """Return name and capabilities for this cell.""" + context = req.environ['nova.context'] + authorize(context) + cell_capabs = {} + my_caps = CONF.cells.capabilities + for cap in my_caps: + key, value = cap.split('=') + cell_capabs[key] = value + cell = {'name': CONF.cells.name, + 'type': 'self', + 'rpc_host': None, + 'rpc_port': 0, + 'username': None, + 'capabilities': cell_capabs} + return dict(cell=cell) + + @wsgi.serializers(xml=CellTemplate) + def show(self, req, id): + """Return data about the given cell name. 'id' is a cell name.""" + context = req.environ['nova.context'] + authorize(context) + try: + cell = db.cell_get(context, id) + except exception.CellNotFound: + raise exc.HTTPNotFound() + return dict(cell=_scrub_cell(cell)) + + def delete(self, req, id): + """Delete a child or parent cell entry. 'id' is a cell name.""" + context = req.environ['nova.context'] + authorize(context) + num_deleted = db.cell_delete(context, id) + if num_deleted == 0: + raise exc.HTTPNotFound() + return {} + + def _validate_cell_name(self, cell_name): + """Validate cell name is not empty and doesn't contain '!' or '.'.""" + if not cell_name: + msg = _("Cell name cannot be empty") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + if '!' in cell_name or '.' in cell_name: + msg = _("Cell name cannot contain '!' or '.'") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + + def _validate_cell_type(self, cell_type): + """Validate cell_type is 'parent' or 'child'.""" + if cell_type not in ['parent', 'child']: + msg = _("Cell type must be 'parent' or 'child'") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + + def _convert_cell_type(self, cell): + """Convert cell['type'] to is_parent boolean.""" + if 'type' in cell: + self._validate_cell_type(cell['type']) + cell['is_parent'] = cell['type'] == 'parent' + del cell['type'] + else: + cell['is_parent'] = False + + @wsgi.serializers(xml=CellTemplate) + @wsgi.deserializers(xml=CellDeserializer) + def create(self, req, body): + """Create a child cell entry.""" + context = req.environ['nova.context'] + authorize(context) + if 'cell' not in body: + msg = _("No cell information in request") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + cell = body['cell'] + if 'name' not in cell: + msg = _("No cell name in request") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + self._validate_cell_name(cell['name']) + self._convert_cell_type(cell) + cell = db.cell_create(context, cell) + return dict(cell=_scrub_cell(cell)) + + @wsgi.serializers(xml=CellTemplate) + @wsgi.deserializers(xml=CellDeserializer) + def update(self, req, id, body): + """Update a child cell entry. 'id' is the cell name to update.""" + context = req.environ['nova.context'] + authorize(context) + if 'cell' not in body: + msg = _("No cell information in request") + LOG.error(msg) + raise exc.HTTPBadRequest(explanation=msg) + cell = body['cell'] + cell.pop('id', None) + if 'name' in cell: + self._validate_cell_name(cell['name']) + self._convert_cell_type(cell) + try: + cell = db.cell_update(context, id, cell) + except exception.CellNotFound: + raise exc.HTTPNotFound() + return dict(cell=_scrub_cell(cell)) + + def sync_instances(self, req, body): + """Tell all cells to sync instance info.""" + context = req.environ['nova.context'] + authorize(context) + project_id = body.pop('project_id', None) + deleted = body.pop('deleted', False) + updated_since = body.pop('updated_since', None) + if body: + msg = _("Only 'updated_since' and 'project_id' are understood.") + raise exc.HTTPBadRequest(explanation=msg) + if updated_since: + try: + timeutils.parse_isotime(updated_since) + except ValueError: + msg = _('Invalid changes-since value') + raise exc.HTTPBadRequest(explanation=msg) + self.cells_rpcapi.sync_instances(context, project_id=project_id, + updated_since=updated_since, deleted=deleted) + + +class Cells(extensions.ExtensionDescriptor): + """Enables cells-related functionality such as adding neighbor cells, + listing neighbor cells, and getting the capabilities of the local cell. + """ + + name = "Cells" + alias = "os-cells" + namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1" + updated = "2011-09-21T00:00:00+00:00" + + def get_resources(self): + coll_actions = { + 'detail': 'GET', + 'info': 'GET', + 'sync_instances': 'POST', + } + + res = extensions.ResourceExtension('os-cells', + Controller(), collection_actions=coll_actions) + return [res] diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py index 4f88d033c..4895a9e7b 100644 --- a/nova/api/openstack/compute/contrib/consoles.py +++ b/nova/api/openstack/compute/contrib/consoles.py @@ -53,10 +53,33 @@ class ConsolesController(wsgi.Controller): return {'console': {'type': console_type, 'url': output['url']}} + @wsgi.action('os-getSPICEConsole') + def get_spice_console(self, req, id, body): + """Get text console output.""" + context = req.environ['nova.context'] + authorize(context) + + # If type is not supplied or unknown, get_spice_console below will cope + console_type = body['os-getSPICEConsole'].get('type') + + try: + instance = self.compute_api.get(context, id) + output = self.compute_api.get_spice_console(context, + instance, + console_type) + except exception.InstanceNotFound as e: + raise webob.exc.HTTPNotFound(explanation=unicode(e)) + except exception.InstanceNotReady as e: + raise webob.exc.HTTPConflict(explanation=unicode(e)) + + return {'console': {'type': console_type, 'url': output['url']}} + def get_actions(self): """Return the actions the extension adds, as required by contract.""" actions = [extensions.ActionExtension("servers", "os-getVNCConsole", - self.get_vnc_console)] + self.get_vnc_console), + extensions.ActionExtension("servers", "os-getSPICEConsole", + self.get_spice_console)] return actions diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py index 52487c305..d1b39d6db 100644 --- a/nova/api/openstack/compute/contrib/hosts.py +++ b/nova/api/openstack/compute/contrib/hosts.py @@ -124,10 +124,17 @@ class HostController(object): """ context = req.environ['nova.context'] authorize(context) + filters = {} zone = req.GET.get('zone', None) - data = self.api.list_hosts(context, zone) - - return {'hosts': data} + if zone: + filters['availability_zone'] = zone + services = self.api.service_get_all(context, filters=filters) + hosts = [] + for service in services: + hosts.append({'host_name': service['host'], + 'service': service['topic'], + 'zone': service['availability_zone']}) + return {'hosts': hosts} @wsgi.serializers(xml=HostUpdateTemplate) @wsgi.deserializers(xml=HostUpdateDeserializer) @@ -243,6 +250,55 @@ class HostController(object): def reboot(self, req, id): return self._host_power_action(req, host_name=id, action="reboot") + @staticmethod + def _get_total_resources(host_name, compute_node): + return {'resource': {'host': host_name, + 'project': '(total)', + 'cpu': compute_node['vcpus'], + 'memory_mb': compute_node['memory_mb'], + 'disk_gb': compute_node['local_gb']}} + + @staticmethod + def _get_used_now_resources(host_name, compute_node): + return {'resource': {'host': host_name, + 'project': '(used_now)', + 'cpu': compute_node['vcpus_used'], + 'memory_mb': compute_node['memory_mb_used'], + 'disk_gb': compute_node['local_gb_used']}} + + @staticmethod + def _get_resource_totals_from_instances(host_name, instances): + cpu_sum = 0 + mem_sum = 0 + hdd_sum = 0 + for instance in instances: + cpu_sum += instance['vcpus'] + mem_sum += instance['memory_mb'] + hdd_sum += instance['root_gb'] + instance['ephemeral_gb'] + + return {'resource': {'host': host_name, + 'project': '(used_max)', + 'cpu': cpu_sum, + 'memory_mb': mem_sum, + 'disk_gb': hdd_sum}} + + @staticmethod + def _get_resources_by_project(host_name, instances): + # Getting usage resource per project + project_map = {} + for instance in instances: + resource = project_map.setdefault(instance['project_id'], + {'host': host_name, + 'project': instance['project_id'], + 'cpu': 0, + 'memory_mb': 0, + 'disk_gb': 0}) + resource['cpu'] += instance['vcpus'] + resource['memory_mb'] += instance['memory_mb'] + resource['disk_gb'] += (instance['root_gb'] + + instance['ephemeral_gb']) + return project_map + @wsgi.serializers(xml=HostShowTemplate) def show(self, req, id): """Shows the physical/usage resource given by hosts. @@ -256,14 +312,26 @@ class HostController(object): 'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30} """ context = req.environ['nova.context'] + host_name = id try: - data = self.api.describe_host(context, id) + service = self.api.service_get_by_compute_host(context, host_name) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.message) except exception.AdminRequired: msg = _("Describe-resource is admin only functionality") raise webob.exc.HTTPForbidden(explanation=msg) - return {'host': data} + compute_node = service['compute_node'][0] + instances = self.api.instance_get_all_by_host(context, host_name) + resources = [self._get_total_resources(host_name, compute_node)] + resources.append(self._get_used_now_resources(host_name, + compute_node)) + resources.append(self._get_resource_totals_from_instances(host_name, + instances)) + by_proj_resources = self._get_resources_by_project(host_name, + instances) + for resource in by_proj_resources.itervalues(): + resources.append({'resource': resource}) + return {'host': resources} class Hosts(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py index 4990c1b5e..3cdda1d76 100644 --- a/nova/api/openstack/compute/contrib/networks_associate.py +++ b/nova/api/openstack/compute/contrib/networks_associate.py @@ -62,6 +62,6 @@ class Networks_associate(extensions.ExtensionDescriptor): def get_controller_extensions(self): extension = extensions.ControllerExtension( - self, 'os-admin-networks', NetworkAssociateActionController()) + self, 'os-networks', NetworkAssociateActionController()) return [extension] diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py index 4be0bd100..d1d172686 100644 --- a/nova/api/openstack/compute/contrib/os_networks.py +++ b/nova/api/openstack/compute/contrib/os_networks.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2013 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,199 +16,155 @@ # License for the specific language governing permissions and limitations # under the License. - import netaddr -import netaddr.core as netexc +import webob from webob import exc from nova.api.openstack import extensions -from nova import context as nova_context +from nova.api.openstack import wsgi from nova import exception -import nova.network -from nova.openstack.common import cfg +from nova import network from nova.openstack.common import log as logging -from nova import quota - - -CONF = cfg.CONF - -try: - os_network_opts = [ - cfg.BoolOpt("enable_network_quota", - default=False, - help="Enables or disables quotaing of tenant networks"), - cfg.StrOpt('use_quantum_default_nets', - default="False", - help=('Control for checking for default networks')), - cfg.StrOpt('quantum_default_tenant_id', - default="default", - help=('Default tenant id when creating quantum ' - 'networks')) - ] - CONF.register_opts(os_network_opts) -except cfg.DuplicateOptError: - # NOTE(jkoelker) These options are verbatim elsewhere this is here - # to make sure they are registered for our use. - pass - -if CONF.enable_network_quota: - opts = [ - cfg.IntOpt('quota_networks', - default=3, - help='number of private networks allowed per project'), - ] - CONF.register_opts(opts) - -QUOTAS = quota.QUOTAS -LOG = logging.getLogger(__name__) -authorize = extensions.extension_authorizer('compute', 'os-networks') - - -def network_dict(network): - return {"id": network.get("uuid") or network["id"], - "cidr": network["cidr"], - "label": network["label"]} +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('compute', 'networks') +authorize_view = extensions.extension_authorizer('compute', + 'networks:view') + + +def network_dict(context, network): + fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2', + 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6') + admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted', + 'injected', 'bridge', 'vlan', 'vpn_public_address', + 'vpn_public_port', 'vpn_private_address', 'dhcp_start', + 'project_id', 'host', 'bridge_interface', 'multi_host', + 'priority', 'rxtx_base') + if network: + # NOTE(mnaser): We display a limited set of fields so users can know + # what networks are available, extra system-only fields + # are only visible if they are an admin. + if context.is_admin: + fields += admin_fields + result = dict((field, network[field]) for field in fields) + if 'uuid' in network: + result['id'] = network['uuid'] + return result + else: + return {} + + +class NetworkController(wsgi.Controller): -class NetworkController(object): def __init__(self, network_api=None): - self.network_api = nova.network.API() - self._default_networks = [] - - def _refresh_default_networks(self): - self._default_networks = [] - if CONF.use_quantum_default_nets == "True": - try: - self._default_networks = self._get_default_networks() - except Exception: - LOG.exception("Failed to get default networks") - - def _get_default_networks(self): - project_id = CONF.quantum_default_tenant_id - ctx = nova_context.RequestContext(user_id=None, - project_id=project_id) - networks = {} - for n in self.network_api.get_all(ctx): - networks[n['id']] = n['label'] - return [{'id': k, 'label': v} for k, v in networks.iteritems()] + self.network_api = network_api or network.API() def index(self, req): context = req.environ['nova.context'] - authorize(context) + authorize_view(context) networks = self.network_api.get_all(context) - if not self._default_networks: - self._refresh_default_networks() - networks.extend(self._default_networks) - return {'networks': [network_dict(n) for n in networks]} + result = [network_dict(context, net_ref) for net_ref in networks] + return {'networks': result} - def show(self, req, id): + @wsgi.action("disassociate") + def _disassociate_host_and_project(self, req, id, body): context = req.environ['nova.context'] authorize(context) + LOG.debug(_("Disassociating network with id %s"), id) + + try: + self.network_api.associate(context, id, host=None, project=None) + except exception.NetworkNotFound: + raise exc.HTTPNotFound(_("Network not found")) + return exc.HTTPAccepted() + + def show(self, req, id): + context = req.environ['nova.context'] + authorize_view(context) LOG.debug(_("Showing network with id %s") % id) try: network = self.network_api.get(context, id) except exception.NetworkNotFound: raise exc.HTTPNotFound(_("Network not found")) - return network_dict(network) + return {'network': network_dict(context, network)} def delete(self, req, id): context = req.environ['nova.context'] authorize(context) - try: - if CONF.enable_network_quota: - reservation = QUOTAS.reserve(context, networks=-1) - except Exception: - reservation = None - LOG.exception(_("Failed to update usages deallocating " - "network.")) - LOG.info(_("Deleting network with id %s") % id) - try: self.network_api.delete(context, id) - if CONF.enable_network_quota and reservation: - QUOTAS.commit(context, reservation) - response = exc.HTTPAccepted() except exception.NetworkNotFound: - response = exc.HTTPNotFound(_("Network not found")) - - return response + raise exc.HTTPNotFound(_("Network not found")) + return exc.HTTPAccepted() def create(self, req, body): - if not body: - raise exc.HTTPUnprocessableEntity() - - context = req.environ["nova.context"] + context = req.environ['nova.context'] authorize(context) - network = body["network"] - keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size", - "num_networks"] - kwargs = dict((k, network.get(k)) for k in keys) + def bad(e): + return exc.HTTPUnprocessableEntity(explanation=e) - label = network["label"] + if not (body and body.get("network")): + raise bad(_("Missing network in body")) - if not (kwargs["cidr"] or kwargs["cidr_v6"]): - msg = _("No CIDR requested") - raise exc.HTTPBadRequest(explanation=msg) - if kwargs["cidr"]: - try: - net = netaddr.IPNetwork(kwargs["cidr"]) - if net.size < 4: - msg = _("Requested network does not contain " - "enough (2+) usable hosts") - raise exc.HTTPBadRequest(explanation=msg) - except netexc.AddrFormatError: - msg = _("CIDR is malformed.") - raise exc.HTTPBadRequest(explanation=msg) - except netexc.AddrConversionError: - msg = _("Address could not be converted.") - raise exc.HTTPBadRequest(explanation=msg) - - networks = [] + params = body["network"] + if not params.get("label"): + raise bad(_("Network label is required")) + + cidr = params.get("cidr") or params.get("cidr_v6") + if not cidr: + raise bad(_("Network cidr or cidr_v6 is required")) + + LOG.debug(_("Creating network with label %s") % params["label"]) + + params["num_networks"] = 1 + params["network_size"] = netaddr.IPNetwork(cidr).size + + network = self.network_api.create(context, **params)[0] + return {"network": network_dict(context, network)} + + def add(self, req, body): + context = req.environ['nova.context'] + authorize(context) + if not body: + raise exc.HTTPUnprocessableEntity() + + network_id = body.get('id', None) + project_id = context.project_id + LOG.debug(_("Associating network %(network)s" + " with project %(project)s") % + {"network": network_id or "", + "project": project_id}) try: - if CONF.enable_network_quota: - reservation = QUOTAS.reserve(context, networks=1) - except exception.OverQuota: - msg = _("Quota exceeded, too many networks.") + self.network_api.add_network_to_project( + context, project_id, network_id) + except Exception as ex: + msg = (_("Cannot associate network %(network)s" + " with project %(project)s: %(message)s") % + {"network": network_id or "", + "project": project_id, + "message": getattr(ex, "value", str(ex))}) raise exc.HTTPBadRequest(explanation=msg) - try: - networks = self.network_api.create(context, - label=label, **kwargs) - if CONF.enable_network_quota: - QUOTAS.commit(context, reservation) - except Exception: - if CONF.enable_network_quota: - QUOTAS.rollback(context, reservation) - msg = _("Create networks failed") - LOG.exception(msg, extra=network) - raise exc.HTTPServiceUnavailable(explanation=msg) - return {"network": network_dict(networks[0])} + return webob.Response(status_int=202) class Os_networks(extensions.ExtensionDescriptor): - """Tenant-based Network Management Extension.""" + """Admin-only Network Management Extension.""" - name = "OSNetworks" + name = "Networks" alias = "os-networks" - namespace = "http://docs.openstack.org/compute/ext/os-networks/api/v1.1" - updated = "2012-03-07T09:46:43-05:00" + namespace = ("http://docs.openstack.org/compute/" + "ext/os-networks/api/v1.1") + updated = "2011-12-23T00:00:00+00:00" def get_resources(self): - ext = extensions.ResourceExtension('os-networks', - NetworkController()) - return [ext] - - -def _sync_networks(context, project_id, session): - ctx = nova_context.RequestContext(user_id=None, project_id=project_id) - ctx = ctx.elevated() - networks = nova.network.api.API().get_all(ctx) - return dict(networks=len(networks)) - - -if CONF.enable_network_quota: - QUOTAS.register_resource(quota.ReservableResource('networks', - _sync_networks, - 'quota_networks')) + member_actions = {'action': 'POST'} + collection_actions = {'add': 'POST'} + res = extensions.ResourceExtension( + 'os-networks', + NetworkController(), + member_actions=member_actions, + collection_actions=collection_actions) + return [res] diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py new file mode 100644 index 000000000..03178ab65 --- /dev/null +++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py @@ -0,0 +1,214 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import netaddr +import netaddr.core as netexc +from webob import exc + +from nova.api.openstack import extensions +from nova import context as nova_context +from nova import exception +import nova.network +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova import quota + + +CONF = cfg.CONF + +try: + os_network_opts = [ + cfg.BoolOpt("enable_network_quota", + default=False, + help="Enables or disables quotaing of tenant networks"), + cfg.StrOpt('use_quantum_default_nets', + default="False", + help=('Control for checking for default networks')), + cfg.StrOpt('quantum_default_tenant_id', + default="default", + help=('Default tenant id when creating quantum ' + 'networks')) + ] + CONF.register_opts(os_network_opts) +except cfg.DuplicateOptError: + # NOTE(jkoelker) These options are verbatim elsewhere this is here + # to make sure they are registered for our use. + pass + +if CONF.enable_network_quota: + opts = [ + cfg.IntOpt('quota_networks', + default=3, + help='number of private networks allowed per project'), + ] + CONF.register_opts(opts) + +QUOTAS = quota.QUOTAS +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('compute', 'os-tenant-networks') + + +def network_dict(network): + return {"id": network.get("uuid") or network["id"], + "cidr": network["cidr"], + "label": network["label"]} + + +class NetworkController(object): + def __init__(self, network_api=None): + self.network_api = nova.network.API() + self._default_networks = [] + + def _refresh_default_networks(self): + self._default_networks = [] + if CONF.use_quantum_default_nets == "True": + try: + self._default_networks = self._get_default_networks() + except Exception: + LOG.exception("Failed to get default networks") + + def _get_default_networks(self): + project_id = CONF.quantum_default_tenant_id + ctx = nova_context.RequestContext(user_id=None, + project_id=project_id) + networks = {} + for n in self.network_api.get_all(ctx): + networks[n['id']] = n['label'] + return [{'id': k, 'label': v} for k, v in networks.iteritems()] + + def index(self, req): + context = req.environ['nova.context'] + authorize(context) + networks = self.network_api.get_all(context) + if not self._default_networks: + self._refresh_default_networks() + networks.extend(self._default_networks) + return {'networks': [network_dict(n) for n in networks]} + + def show(self, req, id): + context = req.environ['nova.context'] + authorize(context) + LOG.debug(_("Showing network with id %s") % id) + try: + network = self.network_api.get(context, id) + except exception.NetworkNotFound: + raise exc.HTTPNotFound(_("Network not found")) + return network_dict(network) + + def delete(self, req, id): + context = req.environ['nova.context'] + authorize(context) + try: + if CONF.enable_network_quota: + reservation = QUOTAS.reserve(context, networks=-1) + except Exception: + reservation = None + LOG.exception(_("Failed to update usages deallocating " + "network.")) + + LOG.info(_("Deleting network with id %s") % id) + + try: + self.network_api.delete(context, id) + if CONF.enable_network_quota and reservation: + QUOTAS.commit(context, reservation) + response = exc.HTTPAccepted() + except exception.NetworkNotFound: + response = exc.HTTPNotFound(_("Network not found")) + + return response + + def create(self, req, body): + if not body: + raise exc.HTTPUnprocessableEntity() + + context = req.environ["nova.context"] + authorize(context) + + network = body["network"] + keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size", + "num_networks"] + kwargs = dict((k, network.get(k)) for k in keys) + + label = network["label"] + + if not (kwargs["cidr"] or kwargs["cidr_v6"]): + msg = _("No CIDR requested") + raise exc.HTTPBadRequest(explanation=msg) + if kwargs["cidr"]: + try: + net = netaddr.IPNetwork(kwargs["cidr"]) + if net.size < 4: + msg = _("Requested network does not contain " + "enough (2+) usable hosts") + raise exc.HTTPBadRequest(explanation=msg) + except netexc.AddrFormatError: + msg = _("CIDR is malformed.") + raise exc.HTTPBadRequest(explanation=msg) + except netexc.AddrConversionError: + msg = _("Address could not be converted.") + raise exc.HTTPBadRequest(explanation=msg) + + networks = [] + try: + if CONF.enable_network_quota: + reservation = QUOTAS.reserve(context, networks=1) + except exception.OverQuota: + msg = _("Quota exceeded, too many networks.") + raise exc.HTTPBadRequest(explanation=msg) + + try: + networks = self.network_api.create(context, + label=label, **kwargs) + if CONF.enable_network_quota: + QUOTAS.commit(context, reservation) + except Exception: + if CONF.enable_network_quota: + QUOTAS.rollback(context, reservation) + msg = _("Create networks failed") + LOG.exception(msg, extra=network) + raise exc.HTTPServiceUnavailable(explanation=msg) + return {"network": network_dict(networks[0])} + + +class Os_tenant_networks(extensions.ExtensionDescriptor): + """Tenant-based Network Management Extension.""" + + name = "OSTenantNetworks" + alias = "os-tenant-networks" + namespace = ("http://docs.openstack.org/compute/" + "ext/os-tenant-networks/api/v2") + updated = "2012-03-07T09:46:43-05:00" + + def get_resources(self): + ext = extensions.ResourceExtension('os-tenant-networks', + NetworkController()) + return [ext] + + +def _sync_networks(context, project_id, session): + ctx = nova_context.RequestContext(user_id=None, project_id=project_id) + ctx = ctx.elevated() + networks = nova.network.api.API().get_all(ctx) + return dict(networks=len(networks)) + + +if CONF.enable_network_quota: + QUOTAS.register_resource(quota.ReservableResource('networks', + _sync_networks, + 'quota_networks')) diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py index c792c72da..2786ad814 100644 --- a/nova/api/openstack/compute/contrib/services.py +++ b/nova/api/openstack/compute/contrib/services.py @@ -21,6 +21,7 @@ import webob.exc from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil +from nova import availability_zones from nova import db from nova import exception from nova.openstack.common import cfg @@ -69,6 +70,7 @@ class ServiceController(object): authorize(context) now = timeutils.utcnow() services = db.service_get_all(context) + services = availability_zones.set_availability_zones(context, services) host = '' if 'host' in req.GET: diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py index 023a054d0..0de5d536f 100644 --- a/nova/api/openstack/compute/server_metadata.py +++ b/nova/api/openstack/compute/server_metadata.py @@ -136,6 +136,10 @@ class Controller(object): raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error), headers={'Retry-After': 0}) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'update metadata') + @wsgi.serializers(xml=common.MetaItemTemplate) def show(self, req, server_id, id): """Return a single metadata item.""" @@ -162,10 +166,15 @@ class Controller(object): try: server = self.compute_api.get(context, server_id) self.compute_api.delete_instance_metadata(context, server, id) + except exception.InstanceNotFound: msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) + except exception.InstanceInvalidState as state_error: + common.raise_http_conflict_for_instance_invalid_state(state_error, + 'delete metadata') + def create_resource(): return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 82eae442c..93a07ec3f 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -538,10 +538,11 @@ class Controller(wsgi.Controller): marker=marker) except exception.MarkerNotFound as e: msg = _('marker [%s] not found') % marker - raise webob.exc.HTTPBadRequest(explanation=msg) + raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound as e: - msg = _("Flavor could not be found") - raise webob.exc.HTTPUnprocessableEntity(explanation=msg) + log_msg = _("Flavor '%s' could not be found ") + LOG.debug(log_msg, search_opts['flavor']) + instance_list = [] if is_detail: self._add_instance_faults(context, instance_list) @@ -561,17 +562,28 @@ class Controller(wsgi.Controller): req.cache_db_instance(instance) return instance - def _validate_server_name(self, value): + def _check_string_length(self, value, name, max_length=None): if not isinstance(value, basestring): - msg = _("Server name is not a string or unicode") + msg = _("%s is not a string or unicode") % name raise exc.HTTPBadRequest(explanation=msg) if not value.strip(): - msg = _("Server name is an empty string") + msg = _("%s is an empty string") % name + raise exc.HTTPBadRequest(explanation=msg) + + if max_length and len(value) > max_length: + msg = _("%(name)s can be at most %(max_length)s " + "characters.") % locals() raise exc.HTTPBadRequest(explanation=msg) - if not len(value) < 256: - msg = _("Server name must be less than 256 characters.") + def _validate_server_name(self, value): + self._check_string_length(value, 'Server name', max_length=255) + + def _validate_device_name(self, value): + self._check_string_length(value, 'Device name', max_length=255) + + if ' ' in value: + msg = _("Device name cannot include spaces.") raise exc.HTTPBadRequest(explanation=msg) def _get_injected_files(self, personality): @@ -809,6 +821,7 @@ class Controller(wsgi.Controller): if self.ext_mgr.is_loaded('os-volumes'): block_device_mapping = server_dict.get('block_device_mapping', []) for bdm in block_device_mapping: + self._validate_device_name(bdm["device_name"]) if 'delete_on_termination' in bdm: bdm['delete_on_termination'] = utils.bool_from_str( bdm['delete_on_termination']) @@ -828,21 +841,24 @@ class Controller(wsgi.Controller): try: min_count = int(min_count) except ValueError: - raise webob.exc.HTTPBadRequest(_('min_count must be an ' - 'integer value')) + msg = _('min_count must be an integer value') + raise exc.HTTPBadRequest(explanation=msg) if min_count < 1: - raise webob.exc.HTTPBadRequest(_('min_count must be > 0')) + msg = _('min_count must be > 0') + raise exc.HTTPBadRequest(explanation=msg) try: max_count = int(max_count) except ValueError: - raise webob.exc.HTTPBadRequest(_('max_count must be an ' - 'integer value')) + msg = _('max_count must be an integer value') + raise exc.HTTPBadRequest(explanation=msg) if max_count < 1: - raise webob.exc.HTTPBadRequest(_('max_count must be > 0')) + msg = _('max_count must be > 0') + raise exc.HTTPBadRequest(explanation=msg) if min_count > max_count: - raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count')) + msg = _('min_count must be <= max_count') + raise exc.HTTPBadRequest(explanation=msg) auto_disk_config = False if self.ext_mgr.is_loaded('OS-DCF'): @@ -1204,7 +1220,8 @@ class Controller(wsgi.Controller): try: body = body['rebuild'] except (KeyError, TypeError): - raise exc.HTTPBadRequest(_("Invalid request body")) + msg = _('Invalid request body') + raise exc.HTTPBadRequest(explanation=msg) try: image_href = body["imageRef"] diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py index d281f6a61..939515468 100644 --- a/nova/api/openstack/compute/views/servers.py +++ b/nova/api/openstack/compute/views/servers.py @@ -211,9 +211,9 @@ class ViewBuilder(common.ViewBuilder): if fault.get('details', None): is_admin = False - context = getattr(request, 'context', None) + context = request.environ["nova.context"] if context: - is_admin = getattr(request.context, 'is_admin', False) + is_admin = getattr(context, 'is_admin', False) if is_admin or fault['code'] != 500: fault_dict['details'] = fault["details"] diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 519669134..733685b14 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -919,6 +919,10 @@ class Resource(wsgi.Application): msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + if body: + LOG.info(_("Action: '%(action)s', body: %(body)s") % locals()) + LOG.debug(_("Calling method %s") % meth) + # Now, deserialize the request body... try: if content_type: diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py index 70ff73b2b..77ab4415c 100644 --- a/nova/api/sizelimit.py +++ b/nova/api/sizelimit.py @@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__) class RequestBodySizeLimiter(wsgi.Middleware): - """Add a 'nova.context' to WSGI environ.""" + """Limit the size of incoming requests.""" def __init__(self, *args, **kwargs): super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) diff --git a/nova/availability_zones.py b/nova/availability_zones.py index cb5cce591..09cbd98b8 100644 --- a/nova/availability_zones.py +++ b/nova/availability_zones.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""utilities for multiple APIs.""" +"""Availability zone helper functions.""" from nova import db from nova.openstack.common import cfg @@ -46,7 +46,7 @@ def set_availability_zones(context, services): az = CONF.internal_service_availability_zone if service['topic'] == "compute": if metadata.get(service['host']): - az = str(metadata[service['host']])[5:-2] + az = u','.join(list(metadata[service['host']])) else: az = CONF.default_availability_zone service['availability_zone'] = az @@ -55,8 +55,30 @@ def set_availability_zones(context, services): def get_host_availability_zone(context, host): metadata = db.aggregate_metadata_get_by_host( - context.get_admin_context(), host, key='availability_zone') + context, host, key='availability_zone') if 'availability_zone' in metadata: return list(metadata['availability_zone'])[0] else: return CONF.default_availability_zone + + +def get_availability_zones(context): + """Return available and unavailable zones.""" + enabled_services = db.service_get_all(context, False) + disabled_services = db.service_get_all(context, True) + enabled_services = set_availability_zones(context, enabled_services) + disabled_services = set_availability_zones(context, disabled_services) + + available_zones = [] + for zone in [service['availability_zone'] for service + in enabled_services]: + if not zone in available_zones: + available_zones.append(zone) + + not_available_zones = [] + zones = [service['available_zones'] for service in disabled_services + if service['available_zones'] not in available_zones] + for zone in zones: + if zone not in not_available_zones: + not_available_zones.append(zone) + return (available_zones, not_available_zones) diff --git a/nova/cells/manager.py b/nova/cells/manager.py index 0942bae28..133946794 100644 --- a/nova/cells/manager.py +++ b/nova/cells/manager.py @@ -65,7 +65,7 @@ class CellsManager(manager.Manager): Scheduling requests get passed to the scheduler class. """ - RPC_API_VERSION = '1.0' + RPC_API_VERSION = '1.1' def __init__(self, *args, **kwargs): # Mostly for tests. @@ -186,6 +186,10 @@ class CellsManager(manager.Manager): self.msg_runner.schedule_run_instance(ctxt, our_cell, host_sched_kwargs) + def get_cell_info_for_neighbors(self, _ctxt): + """Return cell information for our neighbor cells.""" + return self.state_manager.get_cell_info_for_neighbors() + def run_compute_api_method(self, ctxt, cell_name, method_info, call): """Call a compute API method in a specific cell.""" response = self.msg_runner.run_compute_api_method(ctxt, @@ -218,3 +222,10 @@ class CellsManager(manager.Manager): def bw_usage_update_at_top(self, ctxt, bw_update_info): """Update bandwidth usage at top level cell.""" self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info) + + def sync_instances(self, ctxt, project_id, updated_since, deleted): + """Force a sync of all instances, potentially by project_id, + and potentially since a certain date/time. + """ + self.msg_runner.sync_instances(ctxt, project_id, updated_since, + deleted) diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index 56d521892..34ca74855 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -27,6 +27,7 @@ import sys from eventlet import queue from nova.cells import state as cells_state +from nova.cells import utils as cells_utils from nova import compute from nova import context from nova.db import base @@ -37,6 +38,7 @@ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common.rpc import common as rpc_common +from nova.openstack.common import timeutils from nova.openstack.common import uuidutils from nova import utils @@ -778,6 +780,26 @@ class _BroadcastMessageMethods(_BaseMessageMethods): return self.db.bw_usage_update(message.ctxt, **bw_update_info) + def _sync_instance(self, ctxt, instance): + if instance['deleted']: + self.msg_runner.instance_destroy_at_top(ctxt, instance) + else: + self.msg_runner.instance_update_at_top(ctxt, instance) + + def sync_instances(self, message, project_id, updated_since, deleted, + **kwargs): + projid_str = project_id is None and "<all>" or project_id + since_str = updated_since is None and "<all>" or updated_since + LOG.info(_("Forcing a sync of instances, project_id=" + "%(projid_str)s, updated_since=%(since_str)s"), locals()) + if updated_since is not None: + updated_since = timeutils.parse_isotime(updated_since) + instances = cells_utils.get_instances_to_sync(message.ctxt, + updated_since=updated_since, project_id=project_id, + deleted=deleted) + for instance in instances: + self._sync_instance(message.ctxt, instance) + _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage, 'broadcast': _BroadcastMessage, @@ -1004,6 +1026,18 @@ class MessageRunner(object): 'up', run_locally=False) message.process() + def sync_instances(self, ctxt, project_id, updated_since, deleted): + """Force a sync of all instances, potentially by project_id, + and potentially since a certain date/time. + """ + method_kwargs = dict(project_id=project_id, + updated_since=updated_since, + deleted=deleted) + message = _BroadcastMessage(self, ctxt, 'sync_instances', + method_kwargs, 'down', + run_locally=False) + message.process() + @staticmethod def get_message_types(): return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys() diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py index 8ce298829..0ab4fc352 100644 --- a/nova/cells/rpcapi.py +++ b/nova/cells/rpcapi.py @@ -40,6 +40,7 @@ class CellsAPI(rpc_proxy.RpcProxy): API version history: 1.0 - Initial version. + 1.1 - Adds get_cell_info_for_neighbors() and sync_instances() ''' BASE_RPC_API_VERSION = '1.0' @@ -136,3 +137,21 @@ class CellsAPI(rpc_proxy.RpcProxy): 'info_cache': iicache} self.cast(ctxt, self.make_msg('instance_update_at_top', instance=instance)) + + def get_cell_info_for_neighbors(self, ctxt): + """Get information about our neighbor cells from the manager.""" + if not CONF.cells.enable: + return [] + return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'), + version='1.1') + + def sync_instances(self, ctxt, project_id=None, updated_since=None, + deleted=False): + """Ask all cells to sync instance data.""" + if not CONF.cells.enable: + return + return self.cast(ctxt, self.make_msg('sync_instances', + project_id=project_id, + updated_since=updated_since, + deleted=deleted), + version='1.1') diff --git a/nova/cells/state.py b/nova/cells/state.py index 345c44ca9..e3886bedb 100644 --- a/nova/cells/state.py +++ b/nova/cells/state.py @@ -75,8 +75,8 @@ class CellState(object): def get_cell_info(self): """Return subset of cell information for OS API use.""" - db_fields_to_return = ['id', 'is_parent', 'weight_scale', - 'weight_offset', 'username', 'rpc_host', 'rpc_port'] + db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset', + 'username', 'rpc_host', 'rpc_port'] cell_info = dict(name=self.name, capabilities=self.capabilities) if self.db_info: for field in db_fields_to_return: @@ -267,6 +267,15 @@ class CellStateManager(base.Base): self._update_our_capacity(ctxt) @sync_from_db + def get_cell_info_for_neighbors(self): + """Return cell information for all neighbor cells.""" + cell_list = [cell.get_cell_info() + for cell in self.child_cells.itervalues()] + cell_list.extend([cell.get_cell_info() + for cell in self.parent_cells.itervalues()]) + return cell_list + + @sync_from_db def get_my_state(self): """Return information for my (this) cell.""" return self.my_cell_state diff --git a/nova/compute/api.py b/nova/compute/api.py index 9b51ba13e..765aeeef5 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -92,6 +92,7 @@ CONF = cfg.CONF CONF.register_opts(compute_opts) CONF.import_opt('compute_topic', 'nova.compute.rpcapi') CONF.import_opt('consoleauth_topic', 'nova.consoleauth') +CONF.import_opt('enable', 'nova.cells.opts', group='cells') MAX_USERDATA_SIZE = 65535 QUOTAS = quota.QUOTAS @@ -404,20 +405,20 @@ class API(base.Base): options_from_image['auto_disk_config'] = auto_disk_config return options_from_image - def _create_instance(self, context, instance_type, - image_href, kernel_id, ramdisk_id, - min_count, max_count, - display_name, display_description, - key_name, key_data, security_group, - availability_zone, user_data, metadata, - injected_files, admin_password, - access_ip_v4, access_ip_v6, - requested_networks, config_drive, - block_device_mapping, auto_disk_config, - reservation_id=None, scheduler_hints=None): + def _validate_and_provision_instance(self, context, instance_type, + image_href, kernel_id, ramdisk_id, + min_count, max_count, + display_name, display_description, + key_name, key_data, security_group, + availability_zone, user_data, + metadata, injected_files, + access_ip_v4, access_ip_v6, + requested_networks, config_drive, + block_device_mapping, + auto_disk_config, reservation_id, + scheduler_hints): """Verify all the input parameters regardless of the provisioning - strategy being performed and schedule the instance(s) for - creation.""" + strategy being performed.""" if not metadata: metadata = {} @@ -441,6 +442,19 @@ class API(base.Base): raise exception.InstanceTypeNotFound( instance_type_id=instance_type['id']) + if user_data: + l = len(user_data) + if l > MAX_USERDATA_SIZE: + # NOTE(mikal): user_data is stored in a text column, and + # the database might silently truncate if its over length. + raise exception.InstanceUserDataTooLarge( + length=l, maxsize=MAX_USERDATA_SIZE) + + try: + base64.decodestring(user_data) + except base64.binascii.Error: + raise exception.InstanceUserDataMalformed() + # Reserve quotas num_instances, quota_reservations = self._check_num_instances_quota( context, instance_type, min_count, max_count) @@ -488,9 +502,6 @@ class API(base.Base): key_name) key_data = key_pair['public_key'] - if reservation_id is None: - reservation_id = utils.generate_uid('r') - root_device_name = block_device.properties_root_device_name( image.get('properties', {})) @@ -528,19 +539,6 @@ class API(base.Base): 'root_device_name': root_device_name, 'progress': 0} - if user_data: - l = len(user_data) - if l > MAX_USERDATA_SIZE: - # NOTE(mikal): user_data is stored in a text column, and - # the database might silently truncate if its over length. - raise exception.InstanceUserDataTooLarge( - length=l, maxsize=MAX_USERDATA_SIZE) - - try: - base64.decodestring(user_data) - except base64.binascii.Error: - raise exception.InstanceUserDataMalformed() - options_from_image = self._inherit_properties_from_image( image, auto_disk_config) @@ -583,6 +581,36 @@ class API(base.Base): 'security_group': security_group, } + return (instances, request_spec, filter_properties) + + def _create_instance(self, context, instance_type, + image_href, kernel_id, ramdisk_id, + min_count, max_count, + display_name, display_description, + key_name, key_data, security_group, + availability_zone, user_data, metadata, + injected_files, admin_password, + access_ip_v4, access_ip_v6, + requested_networks, config_drive, + block_device_mapping, auto_disk_config, + reservation_id=None, scheduler_hints=None): + """Verify all the input parameters regardless of the provisioning + strategy being performed and schedule the instance(s) for + creation.""" + + if reservation_id is None: + reservation_id = utils.generate_uid('r') + + (instances, request_spec, filter_properties) = \ + self._validate_and_provision_instance(context, instance_type, + image_href, kernel_id, ramdisk_id, min_count, + max_count, display_name, display_description, + key_name, key_data, security_group, availability_zone, + user_data, metadata, injected_files, access_ip_v4, + access_ip_v6, requested_networks, config_drive, + block_device_mapping, auto_disk_config, + reservation_id, scheduler_hints) + self.scheduler_rpcapi.run_instance(context, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, @@ -854,6 +882,20 @@ class API(base.Base): for host_name in host_names: self.compute_rpcapi.refresh_provider_fw_rules(context, host_name) + def update_state(self, context, instance, new_state): + """Updates the state of a compute instance. + For example to 'active' or 'error'. + Also sets 'task_state' to None. + Used by admin_actions api + + :param context: The security context + :param instance: The instance to update + :param new_state: A member of vm_state, eg. 'active' + """ + self.update(context, instance, + vm_state=new_state, + task_state=None) + @wrap_check_policy def update(self, context, instance, **kwargs): """Updates the instance in the datastore. @@ -908,11 +950,10 @@ class API(base.Base): if (old['vm_state'] != vm_states.SOFT_DELETED and old['task_state'] not in (task_states.DELETING, task_states.SOFT_DELETING)): - reservations = QUOTAS.reserve(context, - project_id=project_id, - instances=-1, - cores=-instance['vcpus'], - ram=-instance['memory_mb']) + reservations = self._create_reservations(context, + old, + updated, + project_id) if not host: # Just update database, nothing else we can do @@ -958,19 +999,16 @@ class API(base.Base): host=src_host, cast=False, reservations=downsize_reservations) - # NOTE(jogo): db allows for multiple compute services per host + is_up = False try: - services = self.db.service_get_all_compute_by_host( + service = self.db.service_get_by_compute_host( context.elevated(), instance['host']) - except exception.ComputeHostNotFound: - services = [] - - is_up = False - for service in services: if self.servicegroup_api.service_is_up(service): is_up = True cb(context, instance, bdms) - break + except exception.ComputeHostNotFound: + pass + if not is_up: # If compute node isn't up, just delete from DB self._local_delete(context, instance, bdms) @@ -991,6 +1029,45 @@ class API(base.Base): reservations, project_id=project_id) + def _create_reservations(self, context, old_instance, new_instance, + project_id): + instance_vcpus = old_instance['vcpus'] + instance_memory_mb = old_instance['memory_mb'] + # NOTE(wangpan): if the instance is resizing, and the resources + # are updated to new instance type, we should use + # the old instance type to create reservation. + # see https://bugs.launchpad.net/nova/+bug/1099729 for more details + if old_instance['task_state'] in (task_states.RESIZE_MIGRATED, + task_states.RESIZE_FINISH): + get_migration = self.db.migration_get_by_instance_and_status + try: + migration_ref = get_migration(context.elevated(), + old_instance['uuid'], 'post-migrating') + except exception.MigrationNotFoundByStatus: + migration_ref = None + if (migration_ref and + new_instance['instance_type_id'] == + migration_ref['new_instance_type_id']): + old_inst_type_id = migration_ref['old_instance_type_id'] + get_inst_type_by_id = instance_types.get_instance_type + try: + old_inst_type = get_inst_type_by_id(old_inst_type_id) + except exception.InstanceTypeNotFound: + LOG.warning(_("instance type %(old_inst_type_id)d " + "not found") % locals()) + pass + else: + instance_vcpus = old_inst_type['vcpus'] + instance_memory_mb = old_inst_type['memory_mb'] + LOG.debug(_("going to delete a resizing instance")) + + reservations = QUOTAS.reserve(context, + project_id=project_id, + instances=-1, + cores=-instance_vcpus, + ram=-instance_memory_mb) + return reservations + def _local_delete(self, context, instance, bdms): LOG.warning(_("instance's host %s is down, deleting from " "database") % instance['host'], instance=instance) @@ -1147,8 +1224,10 @@ class API(base.Base): # NOTE(ameade): we still need to support integer ids for ec2 if uuidutils.is_uuid_like(instance_id): instance = self.db.instance_get_by_uuid(context, instance_id) - else: + elif utils.is_int_like(instance_id): instance = self.db.instance_get(context, instance_id) + else: + raise exception.InstanceNotFound(instance_id=instance_id) check_policy(context, 'get', instance) @@ -1268,7 +1347,7 @@ class API(base.Base): @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def backup(self, context, instance, name, backup_type, rotation, - extra_properties=None): + extra_properties=None, image_id=None): """Backup the given instance :param instance: nova.db.sqlalchemy.models.Instance @@ -1278,14 +1357,27 @@ class API(base.Base): None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ - recv_meta = self._create_image(context, instance, name, 'backup', - backup_type=backup_type, rotation=rotation, - extra_properties=extra_properties) - return recv_meta + instance = self.update(context, instance, + task_state=task_states.IMAGE_BACKUP, + expected_task_state=None) + if image_id: + # The image entry has already been created, so just pull the + # metadata. + image_meta = self.image_service.show(context, image_id) + else: + image_meta = self._create_image(context, instance, name, + 'backup', backup_type=backup_type, + rotation=rotation, extra_properties=extra_properties) + self.compute_rpcapi.snapshot_instance(context, instance=instance, + image_id=image_meta['id'], image_type='backup', + backup_type=backup_type, rotation=rotation) + return image_meta @wrap_check_policy - @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) - def snapshot(self, context, instance, name, extra_properties=None): + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, + vm_states.PAUSED, vm_states.SUSPENDED]) + def snapshot(self, context, instance, name, extra_properties=None, + image_id=None): """Snapshot the given instance. :param instance: nova.db.sqlalchemy.models.Instance @@ -1294,12 +1386,25 @@ class API(base.Base): :returns: A dict containing image metadata """ - return self._create_image(context, instance, name, 'snapshot', - extra_properties=extra_properties) + instance = self.update(context, instance, + task_state=task_states.IMAGE_SNAPSHOT, + expected_task_state=None) + if image_id: + # The image entry has already been created, so just pull the + # metadata. + image_meta = self.image_service.show(context, image_id) + else: + image_meta = self._create_image(context, instance, name, + 'snapshot', extra_properties=extra_properties) + self.compute_rpcapi.snapshot_instance(context, instance=instance, + image_id=image_meta['id'], image_type='snapshot') + return image_meta def _create_image(self, context, instance, name, image_type, backup_type=None, rotation=None, extra_properties=None): - """Create snapshot or backup for an instance on this host. + """Create new image entry in the image service. This new image + will be reserved for the compute manager to upload a snapshot + or backup. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance @@ -1313,29 +1418,6 @@ class API(base.Base): """ instance_uuid = instance['uuid'] - if image_type == "snapshot": - task_state = task_states.IMAGE_SNAPSHOT - elif image_type == "backup": - task_state = task_states.IMAGE_BACKUP - else: - raise Exception(_('Image type not recognized %s') % image_type) - - # change instance state and notify - old_vm_state = instance["vm_state"] - old_task_state = instance["task_state"] - - self.db.instance_test_and_set( - context, instance_uuid, 'task_state', [None], task_state) - - # NOTE(sirp): `instance_test_and_set` only sets the task-state in the - # DB, but we also need to set it on the current instance so that the - # correct value is passed down to the compute manager. - instance['task_state'] = task_state - - notifications.send_update_with_states(context, instance, old_vm_state, - instance["vm_state"], old_task_state, instance["task_state"], - service="api", verify_states=True) - properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), @@ -1382,11 +1464,7 @@ class API(base.Base): # up above will not be overwritten by inherited values properties.setdefault(key, value) - recv_meta = self.image_service.create(context, sent_meta) - self.compute_rpcapi.snapshot_instance(context, instance=instance, - image_id=recv_meta['id'], image_type=image_type, - backup_type=backup_type, rotation=rotation) - return recv_meta + return self.image_service.create(context, sent_meta) @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def snapshot_volume_backed(self, context, instance, image_meta, name, @@ -1529,12 +1607,9 @@ class API(base.Base): elevated = context.elevated() block_info = self._get_block_device_info(elevated, instance['uuid']) - network_info = self.network_api.get_instance_nw_info(elevated, - instance) self.compute_rpcapi.reboot_instance(context, instance=instance, block_device_info=block_info, - network_info=network_info, reboot_type=reboot_type) def _get_image(self, context, image_href): @@ -1647,6 +1722,11 @@ class API(base.Base): self.db.migration_update(elevated, migration_ref['id'], {'status': 'reverting'}) + # With cells, the best we can do right now is commit the reservations + # immediately... + if CONF.cells.enable and reservations: + QUOTAS.commit(context, reservations) + reservations = [] self.compute_rpcapi.revert_resize(context, instance=instance, migration=migration_ref, @@ -1671,6 +1751,11 @@ class API(base.Base): self.db.migration_update(elevated, migration_ref['id'], {'status': 'confirming'}) + # With cells, the best we can do right now is commit the reservations + # immediately... + if CONF.cells.enable and reservations: + QUOTAS.commit(context, reservations) + reservations = [] self.compute_rpcapi.confirm_resize(context, instance=instance, migration=migration_ref, @@ -1833,6 +1918,12 @@ class API(base.Base): if not CONF.allow_resize_to_same_host: filter_properties['ignore_hosts'].append(instance['host']) + # With cells, the best we can do right now is commit the reservations + # immediately... + if CONF.cells.enable and reservations: + QUOTAS.commit(context, reservations) + reservations = [] + args = { "instance": instance, "instance_type": new_instance_type, @@ -1985,6 +2076,29 @@ class API(base.Base): return connect_info @wrap_check_policy + def get_spice_console(self, context, instance, console_type): + """Get a url to an instance Console.""" + if not instance['host']: + raise exception.InstanceNotReady(instance_id=instance['uuid']) + + connect_info = self.compute_rpcapi.get_spice_console(context, + instance=instance, console_type=console_type) + + self.consoleauth_rpcapi.authorize_console(context, + connect_info['token'], console_type, connect_info['host'], + connect_info['port'], connect_info['internal_access_path']) + + return {'url': connect_info['access_url']} + + def get_spice_connect_info(self, context, instance, console_type): + """Used in a child cell to get console info.""" + if not instance['host']: + raise exception.InstanceNotReady(instance_id=instance['uuid']) + connect_info = self.compute_rpcapi.get_spice_console(context, + instance=instance, console_type=console_type) + return connect_info + + @wrap_check_policy def get_console_output(self, context, instance, tail_length=None): """Get console output for an instance.""" return self.compute_rpcapi.get_console_output(context, @@ -2089,6 +2203,9 @@ class API(base.Base): @wrap_check_policy @check_instance_lock + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, + vm_states.SUSPENDED, vm_states.STOPPED], + task_state=None) def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance['uuid'], key) @@ -2100,6 +2217,9 @@ class API(base.Base): @wrap_check_policy @check_instance_lock + @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED, + vm_states.SUSPENDED, vm_states.STOPPED], + task_state=None) def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. @@ -2167,140 +2287,76 @@ class API(base.Base): disk_over_commit, instance, host_name) -def check_host(fn): - """Decorator that makes sure that the host exists.""" - def wrapped(self, context, host_name, *args, **kwargs): - if self.does_host_exist(context, host_name): - return fn(self, context, host_name, *args, **kwargs) - else: - raise exception.HostNotFound(host=host_name) - return wrapped - - class HostAPI(base.Base): """Sub-set of the Compute Manager API for managing host operations.""" - def __init__(self): - self.compute_rpcapi = compute_rpcapi.ComputeAPI() + def __init__(self, rpcapi=None): + self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI() super(HostAPI, self).__init__() - @check_host + def _assert_host_exists(self, context, host_name): + """Raise HostNotFound if compute host doesn't exist.""" + if not self.db.service_get_by_host_and_topic(context, host_name, + CONF.compute_topic): + raise exception.HostNotFound(host=host_name) + def set_host_enabled(self, context, host_name, enabled): """Sets the specified host's ability to accept new instances.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call - return self.compute_rpcapi.set_host_enabled(context, enabled=enabled, + self._assert_host_exists(context, host_name) + return self.rpcapi.set_host_enabled(context, enabled=enabled, host=host_name) - @check_host def get_host_uptime(self, context, host_name): """Returns the result of calling "uptime" on the target host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call - return self.compute_rpcapi.get_host_uptime(context, host=host_name) + self._assert_host_exists(context, host_name) + return self.rpcapi.get_host_uptime(context, host=host_name) - @check_host def host_power_action(self, context, host_name, action): """Reboots, shuts down or powers up the host.""" - return self.compute_rpcapi.host_power_action(context, action=action, + self._assert_host_exists(context, host_name) + return self.rpcapi.host_power_action(context, action=action, host=host_name) - def list_hosts(self, context, zone=None, service=None): - """Returns a summary list of enabled hosts, optionally filtering - by zone and/or service type. + def set_host_maintenance(self, context, host_name, mode): + """Start/Stop host maintenance window. On start, it triggers + guest VMs evacuation.""" + self._assert_host_exists(context, host_name) + return self.rpcapi.host_maintenance_mode(context, + host_param=host_name, mode=mode, host=host_name) + + def service_get_all(self, context, filters=None): + """Returns a list of services, optionally filtering the results. + + If specified, 'filters' should be a dictionary containing services + attributes and matching values. Ie, to get a list of services for + the 'compute' topic, use filters={'topic': 'compute'}. """ - LOG.debug(_("Listing hosts")) + if filters is None: + filters = {} services = self.db.service_get_all(context, False) - services = availability_zones.set_availability_zones(context, services) - if zone: - services = [s for s in services if s['availability_zone'] == zone] - hosts = [] - for host in services: - hosts.append({'host_name': host['host'], 'service': host['topic'], - 'zone': host['availability_zone']}) - if service: - hosts = [host for host in hosts - if host["service"] == service] - return hosts - - def does_host_exist(self, context, host_name): - """ - Returns True if the host with host_name exists, False otherwise - """ - return self.db.service_does_host_exist(context, host_name) + services = availability_zones.set_availability_zones(context, + services) + ret_services = [] + for service in services: + for key, val in filters.iteritems(): + if service[key] != val: + break + else: + # All filters matched. + ret_services.append(service) + return ret_services - def describe_host(self, context, host_name): - """ - Returns information about a host in this kind of format: - :returns: - ex.:: - {'host': 'hostname', - 'project': 'admin', - 'cpu': 1, - 'memory_mb': 2048, - 'disk_gb': 30} - """ - # Getting compute node info and related instances info - try: - compute_ref = self.db.service_get_all_compute_by_host(context, - host_name) - compute_ref = compute_ref[0] - except exception.ComputeHostNotFound: - raise exception.HostNotFound(host=host_name) - instance_refs = self.db.instance_get_all_by_host(context, - compute_ref['host']) - - # Getting total available/used resource - compute_ref = compute_ref['compute_node'][0] - resources = [{'resource': {'host': host_name, 'project': '(total)', - 'cpu': compute_ref['vcpus'], - 'memory_mb': compute_ref['memory_mb'], - 'disk_gb': compute_ref['local_gb']}}, - {'resource': {'host': host_name, 'project': '(used_now)', - 'cpu': compute_ref['vcpus_used'], - 'memory_mb': compute_ref['memory_mb_used'], - 'disk_gb': compute_ref['local_gb_used']}}] - - cpu_sum = 0 - mem_sum = 0 - hdd_sum = 0 - for i in instance_refs: - cpu_sum += i['vcpus'] - mem_sum += i['memory_mb'] - hdd_sum += i['root_gb'] + i['ephemeral_gb'] - - resources.append({'resource': {'host': host_name, - 'project': '(used_max)', - 'cpu': cpu_sum, - 'memory_mb': mem_sum, - 'disk_gb': hdd_sum}}) - - # Getting usage resource per project - project_ids = [i['project_id'] for i in instance_refs] - project_ids = list(set(project_ids)) - for project_id in project_ids: - vcpus = [i['vcpus'] for i in instance_refs - if i['project_id'] == project_id] - - mem = [i['memory_mb'] for i in instance_refs - if i['project_id'] == project_id] - - disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs - if i['project_id'] == project_id] - - resources.append({'resource': {'host': host_name, - 'project': project_id, - 'cpu': sum(vcpus), - 'memory_mb': sum(mem), - 'disk_gb': sum(disk)}}) - return resources - - @check_host - def set_host_maintenance(self, context, host, mode): - """Start/Stop host maintenance window. On start, it triggers - guest VMs evacuation.""" - return self.compute_rpcapi.host_maintenance_mode(context, - host_param=host, mode=mode, host=host) + def service_get_by_compute_host(self, context, host_name): + """Get service entry for the given compute hostname.""" + return self.db.service_get_by_compute_host(context, host_name) + + def instance_get_all_by_host(self, context, host_name): + """Return all instances on the given host.""" + return self.db.instance_get_all_by_host(context, host_name) class AggregateAPI(base.Base): @@ -2364,8 +2420,7 @@ class AggregateAPI(base.Base): def add_host_to_aggregate(self, context, aggregate_id, host_name): """Adds the host to an aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid - service = self.db.service_get_all_compute_by_host( - context, host_name)[0] + self.db.service_get_by_compute_host(context, host_name) aggregate = self.db.aggregate_get(context, aggregate_id) self.db.aggregate_host_add(context, aggregate_id, host_name) #NOTE(jogo): Send message to host to support resource pools @@ -2376,8 +2431,7 @@ class AggregateAPI(base.Base): def remove_host_from_aggregate(self, context, aggregate_id, host_name): """Removes host from the aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid - service = self.db.service_get_all_compute_by_host( - context, host_name)[0] + self.db.service_get_by_compute_host(context, host_name) aggregate = self.db.aggregate_get(context, aggregate_id) self.db.aggregate_host_delete(context, aggregate_id, host_name) self.compute_rpcapi.remove_aggregate_host(context, diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py index 698c6eed0..d5427a04b 100644 --- a/nova/compute/cells_api.py +++ b/nova/compute/cells_api.py @@ -18,7 +18,7 @@ from nova import block_device from nova.cells import rpcapi as cells_rpcapi from nova.compute import api as compute_api -from nova.compute import task_states +from nova.compute import instance_types from nova.compute import vm_states from nova import exception from nova.openstack.common import excutils @@ -115,15 +115,28 @@ class ComputeCellsAPI(compute_api.API): """ return - def _create_image(self, context, instance, name, image_type, - backup_type=None, rotation=None, extra_properties=None): - if backup_type: - return self._call_to_cells(context, instance, 'backup', - name, backup_type, rotation, - extra_properties=extra_properties) - else: - return self._call_to_cells(context, instance, 'snapshot', - name, extra_properties=extra_properties) + def backup(self, context, instance, name, backup_type, rotation, + extra_properties=None, image_id=None): + """Backup the given instance.""" + image_meta = super(ComputeCellsAPI, self).backup(context, + instance, name, backup_type, rotation, + extra_properties=extra_properties, image_id=image_id) + image_id = image_meta['id'] + self._cast_to_cells(context, instance, 'backup', name, + backup_type=backup_type, rotation=rotation, + extra_properties=extra_properties, image_id=image_id) + return image_meta + + def snapshot(self, context, instance, name, extra_properties=None, + image_id=None): + """Snapshot the given instance.""" + image_meta = super(ComputeCellsAPI, self).snapshot(context, + instance, name, extra_properties=extra_properties, + image_id=image_id) + image_id = image_meta['id'] + self._cast_to_cells(context, instance, 'snapshot', + name, extra_properties=extra_properties, image_id=image_id) + return image_meta def create(self, *args, **kwargs): """We can use the base functionality, but I left this here just @@ -131,17 +144,45 @@ class ComputeCellsAPI(compute_api.API): """ return super(ComputeCellsAPI, self).create(*args, **kwargs) - @validate_cell - def update(self, context, instance, **kwargs): - """Update an instance.""" + def update_state(self, context, instance, new_state): + """Updates the state of a compute instance. + For example to 'active' or 'error'. + Also sets 'task_state' to None. + Used by admin_actions api + + :param context: The security context + :param instance: The instance to update + :param new_state: A member of vm_state to change + the instance's state to, + eg. 'active' + """ + self.update(context, instance, + pass_on_state_change=True, + vm_state=new_state, + task_state=None) + + def update(self, context, instance, pass_on_state_change=False, **kwargs): + """ + Update an instance. + :param pass_on_state_change: if true, the state change will be passed + on to child cells + """ + cell_name = instance['cell_name'] + if cell_name and self._cell_read_only(cell_name): + raise exception.InstanceInvalidState( + attr="vm_state", + instance_uuid=instance['uuid'], + state="temporary_readonly", + method='update') rv = super(ComputeCellsAPI, self).update(context, instance, **kwargs) - # We need to skip vm_state/task_state updates... those will - # happen when via a a _cast_to_cells for running a different - # compute api method kwargs_copy = kwargs.copy() - kwargs_copy.pop('vm_state', None) - kwargs_copy.pop('task_state', None) + if not pass_on_state_change: + # We need to skip vm_state/task_state updates... those will + # happen via a _cast_to_cells when running a different + # compute api method + kwargs_copy.pop('vm_state', None) + kwargs_copy.pop('task_state', None) if kwargs_copy: try: self._cast_to_cells(context, instance, 'update', @@ -241,22 +282,14 @@ class ComputeCellsAPI(compute_api.API): @validate_cell def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" - # NOTE(markwash): regular api manipulates the migration here, but we - # don't have access to it. So to preserve the interface just update the - # vm and task state. - self.update(context, instance, - task_state=task_states.RESIZE_REVERTING) + super(ComputeCellsAPI, self).revert_resize(context, instance) self._cast_to_cells(context, instance, 'revert_resize') @check_instance_state(vm_state=[vm_states.RESIZED]) @validate_cell def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" - # NOTE(markwash): regular api manipulates migration here, but we don't - # have the migration in the api database. So to preserve the interface - # just update the vm and task state without calling super() - self.update(context, instance, task_state=None, - vm_state=vm_states.ACTIVE) + super(ComputeCellsAPI, self).confirm_resize(context, instance) self._cast_to_cells(context, instance, 'confirm_resize') @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], @@ -269,8 +302,36 @@ class ComputeCellsAPI(compute_api.API): the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ - super(ComputeCellsAPI, self).resize(context, instance, *args, - **kwargs) + super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs) + + # NOTE(johannes): If we get to this point, then we know the + # specified flavor_id is valid and exists. We'll need to load + # it again, but that should be safe. + + old_instance_type_id = instance['instance_type_id'] + old_instance_type = instance_types.get_instance_type( + old_instance_type_id) + + flavor_id = kwargs.get('flavor_id') + + if not flavor_id: + new_instance_type = old_instance_type + else: + new_instance_type = instance_types.get_instance_type_by_flavor_id( + flavor_id) + + # NOTE(johannes): Later, when the resize is confirmed or reverted, + # the superclass implementations of those methods will need access + # to a local migration record for quota reasons. We don't need + # source and/or destination information, just the old and new + # instance_types. Status is set to 'finished' since nothing else + # will update the status along the way. + self.db.migration_create(context.elevated(), + {'instance_uuid': instance['uuid'], + 'old_instance_type_id': old_instance_type['id'], + 'new_instance_type_id': new_instance_type['id'], + 'status': 'finished'}) + # FIXME(comstud): pass new instance_type object down to a method # that'll unfold it self._cast_to_cells(context, instance, 'resize', *args, **kwargs) @@ -378,6 +439,21 @@ class ComputeCellsAPI(compute_api.API): connect_info['port'], connect_info['internal_access_path']) return {'url': connect_info['access_url']} + @wrap_check_policy + @validate_cell + def get_spice_console(self, context, instance, console_type): + """Get a url to a SPICE Console.""" + if not instance['host']: + raise exception.InstanceNotReady(instance_id=instance['uuid']) + + connect_info = self._call_to_cells(context, instance, + 'get_spice_connect_info', console_type) + + self.consoleauth_rpcapi.authorize_console(context, + connect_info['token'], console_type, connect_info['host'], + connect_info['port'], connect_info['internal_access_path']) + return {'url': connect_info['access_url']} + @validate_cell def get_console_output(self, context, instance, *args, **kwargs): """Get console output for an an instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 85942541f..d1cffea7d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -25,10 +25,6 @@ building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. -**Related Flags** - -:instances_path: Where instances are kept on disk - """ import contextlib @@ -176,7 +172,6 @@ CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') CONF.import_opt('console_topic', 'nova.console.rpcapi') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') -CONF.import_opt('network_manager', 'nova.service') QUOTAS = quota.QUOTAS @@ -234,7 +229,7 @@ def wrap_instance_fault(function): with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, - kwargs['instance']['uuid'], e, sys.exc_info()) + kwargs['instance'], e, sys.exc_info()) return decorated_function @@ -297,7 +292,7 @@ class ComputeVirtAPI(virtapi.VirtAPI): class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" - RPC_API_VERSION = '2.22' + RPC_API_VERSION = '2.24' def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -305,8 +300,6 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.network_api = network.API() self.volume_api = volume.API() - self.network_manager = importutils.import_object( - CONF.network_manager, host=kwargs.get('host', None)) self._last_host_check = 0 self._last_bw_usage_poll = 0 self._last_vol_usage_poll = 0 @@ -467,9 +460,14 @@ class ComputeManager(manager.SchedulerDependentManager): except NotImplementedError: LOG.warning(_('Hypervisor driver does not support ' 'resume guests'), instance=instance) + except Exception: + # NOTE(vish): The instance failed to resume, so we set the + # instance to error and attempt to continue. + LOG.warning(_('Failed to resume instance'), instance=instance) + self._set_instance_error_state(context, instance['uuid']) elif drv_state == power_state.RUNNING: - # VMWareAPI drivers will raise an exception + # VMwareAPI drivers will raise an exception try: self.driver.ensure_filtering_rules_for_instance( instance, @@ -682,9 +680,9 @@ class ComputeManager(manager.SchedulerDependentManager): try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits): - + macs = self.driver.macs_for_instance(instance) network_info = self._allocate_network(context, instance, - requested_networks) + requested_networks, macs) block_device_info = self._prep_block_device(context, instance, bdms) instance = self._spawn(context, instance, image_meta, @@ -734,8 +732,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_uuid = instance['uuid'] rescheduled = False - compute_utils.add_instance_fault_from_exc(context, instance_uuid, - exc_info[0], exc_info=exc_info) + compute_utils.add_instance_fault_from_exc(context, instance, + exc_info[1], exc_info=exc_info) try: self._deallocate_network(context, instance) @@ -915,7 +913,7 @@ class ComputeManager(manager.SchedulerDependentManager): expected_task_state=(task_states.SCHEDULING, None)) - def _allocate_network(self, context, instance, requested_networks): + def _allocate_network(self, context, instance, requested_networks, macs): """Allocate networks for an instance and return the network info.""" instance = self._instance_update(context, instance['uuid'], vm_state=vm_states.BUILDING, @@ -926,7 +924,8 @@ class ComputeManager(manager.SchedulerDependentManager): # allocate and get network info network_info = self.network_api.allocate_for_instance( context, instance, vpn=is_vpn, - requested_networks=requested_networks) + requested_networks=requested_networks, + macs=macs) except Exception: LOG.exception(_('Instance failed network setup'), instance=instance) @@ -1443,19 +1442,14 @@ class ComputeManager(manager.SchedulerDependentManager): if block_device_info is None: block_device_info = self._get_instance_volume_block_device_info( context, instance) - # NOTE(danms): remove this when RPC API < 2.5 compatibility - # is no longer needed - if network_info is None: - network_info = self._get_instance_nw_info(context, instance) - else: - network_info = network_model.NetworkInfo.hydrate(network_info) + network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "reboot.start") current_power_state = self._get_power_state(context, instance) - self._instance_update(context, instance['uuid'], - power_state=current_power_state, - vm_state=vm_states.ACTIVE) + instance = self._instance_update(context, instance['uuid'], + power_state=current_power_state, + vm_state=vm_states.ACTIVE) if instance['power_state'] != power_state.RUNNING: state = instance['power_state'] @@ -1472,14 +1466,14 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.error(_('Cannot reboot instance: %(exc)s'), locals(), context=context, instance=instance) compute_utils.add_instance_fault_from_exc(context, - instance['uuid'], exc, sys.exc_info()) + instance, exc, sys.exc_info()) # Fall through and reset task_state to None current_power_state = self._get_power_state(context, instance) - self._instance_update(context, instance['uuid'], - power_state=current_power_state, - vm_state=vm_states.ACTIVE, - task_state=None) + instance = self._instance_update(context, instance['uuid'], + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None) self._notify_about_instance_usage(context, instance, "reboot.end") @@ -2003,7 +1997,7 @@ class ComputeManager(manager.SchedulerDependentManager): rescheduled = False instance_uuid = instance['uuid'] - compute_utils.add_instance_fault_from_exc(context, instance_uuid, + compute_utils.add_instance_fault_from_exc(context, instance, exc_info[0], exc_info=exc_info) try: @@ -2395,6 +2389,9 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("Getting vnc console"), instance=instance) token = str(uuid.uuid4()) + if not CONF.vnc_enabled: + raise exception.ConsoleTypeInvalid(console_type=console_type) + if console_type == 'novnc': # For essex, novncproxy_base_url must include the full path # including the html file (like http://myhost/vnc_auto.html) @@ -2412,6 +2409,33 @@ class ComputeManager(manager.SchedulerDependentManager): return connect_info + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) + @wrap_instance_fault + def get_spice_console(self, context, console_type, instance): + """Return connection information for a spice console.""" + context = context.elevated() + LOG.debug(_("Getting spice console"), instance=instance) + token = str(uuid.uuid4()) + + if not CONF.spice.enabled: + raise exception.ConsoleTypeInvalid(console_type=console_type) + + if console_type == 'spice-html5': + # For essex, spicehtml5proxy_base_url must include the full path + # including the html file (like http://myhost/spice_auto.html) + access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url, + token) + else: + raise exception.ConsoleTypeInvalid(console_type=console_type) + + # Retrieve connect info from driver, and then decorate with our + # access info token + connect_info = self.driver.get_spice_console(instance) + connect_info['token'] = token + connect_info['access_url'] = access_url + + return connect_info + def _attach_volume_boot(self, context, instance, volume, mountpoint): """Attach a volume to an instance at boot time. So actual attach is done by instance creation""" @@ -2437,8 +2461,11 @@ class ComputeManager(manager.SchedulerDependentManager): @lockutils.synchronized(instance['uuid'], 'nova-') def do_reserve(): + bdms = self.conductor_api.block_device_mapping_get_all_by_instance( + context, instance) result = compute_utils.get_device_name_for_instance(context, instance, + bdms, device) # NOTE(vish): create bdm here to avoid race condition values = {'instance_uuid': instance['uuid'], @@ -2536,7 +2563,7 @@ class ComputeManager(manager.SchedulerDependentManager): mp) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): - msg = _("Faild to detach volume %(volume_id)s from %(mp)s") + msg = _("Failed to detach volume %(volume_id)s from %(mp)s") LOG.exception(msg % locals(), context=context, instance=instance) volume = self.volume_api.get(context, volume_id) @@ -2592,10 +2619,10 @@ class ComputeManager(manager.SchedulerDependentManager): pass def _get_compute_info(self, context, host): - compute_node_ref = self.conductor_api.service_get_all_compute_by_host( + compute_node_ref = self.conductor_api.service_get_by_compute_host( context, host) try: - return compute_node_ref[0]['compute_node'][0] + return compute_node_ref['compute_node'][0] except IndexError: raise exception.NotFound(_("Host %(host)s not found") % locals()) @@ -2842,9 +2869,12 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api.migrate_instance_finish(context, instance, migration) network_info = self._get_instance_nw_info(context, instance) + block_device_info = self._get_instance_volume_block_device_info( + context, instance) + self.driver.post_live_migration_at_destination(context, instance, self._legacy_nw_info(network_info), - block_migration) + block_migration, block_device_info) # Restore instance state current_power_state = self._get_power_state(context, instance) instance = self._instance_update(context, instance['uuid'], @@ -3352,10 +3382,8 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(_("error during stop() in " "sync_power_state."), instance=db_instance) - elif vm_power_state in (power_state.PAUSED, - power_state.SUSPENDED): - LOG.warn(_("Instance is paused or suspended " - "unexpectedly. Calling " + elif vm_power_state == power_state.SUSPENDED: + LOG.warn(_("Instance is suspended unexpectedly. Calling " "the stop API."), instance=db_instance) try: self.compute_api.stop(context, db_instance) @@ -3363,6 +3391,16 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(_("error during stop() in " "sync_power_state."), instance=db_instance) + elif vm_power_state == power_state.PAUSED: + # Note(maoy): a VM may get into the paused state not only + # because the user request via API calls, but also + # due to (temporary) external instrumentations. + # Before the virt layer can reliably report the reason, + # we simply ignore the state discrepancy. In many cases, + # the VM state will go back to running after the external + # instrumentation is done. See bug 1097806 for details. + LOG.warn(_("Instance is paused unexpectedly. Ignore."), + instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 075d59ec8..be0360185 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -25,7 +25,6 @@ from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova import context -from nova import db from nova import exception from nova.openstack.common import cfg from nova.openstack.common import importutils @@ -252,14 +251,15 @@ class ResourceTracker(object): self._report_hypervisor_resource_view(resources) # Grab all instances assigned to this node: - instances = db.instance_get_all_by_host_and_node(context, self.host, - self.nodename) + instances = self.conductor_api.instance_get_all_by_host_and_node( + context, self.host, self.nodename) # Now calculate usage based on instance utilization: self._update_usage_from_instances(resources, instances) # Grab all in-progress migrations: - migrations = db.migration_get_in_progress_by_host_and_node(context, + capi = self.conductor_api + migrations = capi.migration_get_in_progress_by_host_and_node(context, self.host, self.nodename) self._update_usage_from_migrations(resources, migrations) @@ -303,13 +303,13 @@ class ResourceTracker(object): def _create(self, context, values): """Create the compute node in the DB.""" # initialize load stats from existing instances: - compute_node = db.compute_node_create(context, values) - self.compute_node = dict(compute_node) + self.compute_node = self.conductor_api.compute_node_create(context, + values) def _get_service(self, context): try: - return db.service_get_all_compute_by_host(context, - self.host)[0] + return self.conductor_api.service_get_by_compute_host(context, + self.host) except exception.NotFound: LOG.warn(_("No service record for host %s"), self.host) @@ -348,15 +348,15 @@ class ResourceTracker(object): def _update(self, context, values, prune_stats=False): """Persist the compute node updates to the DB.""" - compute_node = db.compute_node_update(context, - self.compute_node['id'], values, prune_stats) - self.compute_node = dict(compute_node) + if "service" in self.compute_node: + del self.compute_node['service'] + self.compute_node = self.conductor_api.compute_node_update( + context, self.compute_node, values, prune_stats) def confirm_resize(self, context, migration, status='confirmed'): """Cleanup usage for a confirmed resize.""" elevated = context.elevated() - db.migration_update(elevated, migration['id'], - {'status': status}) + self.conductor_api.migration_update(elevated, migration, status) self.update_available_resource(elevated) def revert_resize(self, context, migration, status='reverted'): diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index ae283283b..525d1adc7 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -157,6 +157,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): 2.21 - Add migrate_data dict param to pre_live_migration() 2.22 - Add recreate, on_shared_storage and host arguments to rebuild_instance() + 2.23 - Remove network_info from reboot_instance + 2.24 - Added get_spice_console method ''' # @@ -294,6 +296,13 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): instance=instance_p, console_type=console_type), topic=_compute_topic(self.topic, ctxt, None, instance)) + def get_spice_console(self, ctxt, instance, console_type): + instance_p = jsonutils.to_primitive(instance) + return self.call(ctxt, self.make_msg('get_spice_console', + instance=instance_p, console_type=console_type), + topic=_compute_topic(self.topic, ctxt, None, instance), + version='2.24') + def host_maintenance_mode(self, ctxt, host_param, mode, host): '''Set host maintenance mode @@ -383,16 +392,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): _compute_topic(self.topic, ctxt, host, None), version='2.20') - def reboot_instance(self, ctxt, instance, - block_device_info, network_info, reboot_type): + def reboot_instance(self, ctxt, instance, block_device_info, + reboot_type): instance_p = jsonutils.to_primitive(instance) self.cast(ctxt, self.make_msg('reboot_instance', instance=instance_p, block_device_info=block_device_info, - network_info=network_info, reboot_type=reboot_type), topic=_compute_topic(self.topic, ctxt, None, instance), - version='2.5') + version='2.23') def rebuild_instance(self, ctxt, instance, new_pass, injected_files, image_ref, orig_image_ref, orig_sys_metadata, bdms, @@ -525,7 +533,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): version='2.3') def snapshot_instance(self, ctxt, instance, image_id, image_type, - backup_type, rotation): + backup_type=None, rotation=None): instance_p = jsonutils.to_primitive(instance) self.cast(ctxt, self.make_msg('snapshot_instance', instance=instance_p, image_id=image_id, diff --git a/nova/compute/utils.py b/nova/compute/utils.py index 0c475d082..2b1286e16 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -44,7 +44,7 @@ def metadata_to_dict(metadata): return result -def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None): +def add_instance_fault_from_exc(context, instance, fault, exc_info=None): """Adds the specified fault to the database.""" code = 500 @@ -62,15 +62,16 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None): details += '\n' + ''.join(traceback.format_tb(tb)) values = { - 'instance_uuid': instance_uuid, + 'instance_uuid': instance['uuid'], 'code': code, 'message': unicode(message), 'details': unicode(details), + 'host': CONF.host } db.instance_fault_create(context, values) -def get_device_name_for_instance(context, instance, device): +def get_device_name_for_instance(context, instance, bdms, device): """Validates (or generates) a device name for instance. If device is not set, it will generate a unique device appropriate @@ -87,8 +88,6 @@ def get_device_name_for_instance(context, instance, device): req_prefix, req_letters = block_device.match_device(device) except (TypeError, AttributeError, ValueError): raise exception.InvalidDevicePath(path=device) - bdms = db.block_device_mapping_get_all_by_instance(context, - instance['uuid']) mappings = block_device.instance_block_mapping(instance, bdms) try: prefix = block_device.match_device(mappings['root'])[0] diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 4cc10604b..d05c94877 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -97,6 +97,9 @@ class LocalAPI(object): def instance_get_all_by_host(self, context, host): return self._manager.instance_get_all_by_host(context, host) + def instance_get_all_by_host_and_node(self, context, host, node): + return self._manager.instance_get_all_by_host(context, host, node) + def instance_get_all_by_filters(self, context, filters, sort_key='created_at', sort_dir='desc'): @@ -114,6 +117,11 @@ class LocalAPI(object): return self._manager.instance_get_active_by_window( context, begin, end, project_id, host) + def instance_get_active_by_window_joined(self, context, begin, end=None, + project_id=None, host=None): + return self._manager.instance_get_active_by_window_joined( + context, begin, end, project_id, host) + def instance_info_cache_update(self, context, instance, values): return self._manager.instance_info_cache_update(context, instance, @@ -134,6 +142,10 @@ class LocalAPI(object): return self._manager.migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute) + def migration_get_in_progress_by_host_and_node(self, context, host, node): + return self._manager.migration_get_in_progress_by_host_and_node( + context, host, node) + def migration_create(self, context, instance, values): return self._manager.migration_create(context, instance, values) @@ -249,8 +261,11 @@ class LocalAPI(object): def service_get_by_host_and_topic(self, context, host, topic): return self._manager.service_get_all_by(context, topic, host) - def service_get_all_compute_by_host(self, context, host): - return self._manager.service_get_all_by(context, 'compute', host) + def service_get_by_compute_host(self, context, host): + result = self._manager.service_get_all_by(context, 'compute', host) + # FIXME(comstud): A major revision bump to 2.0 should return a + # single entry, so we should just return 'result' at that point. + return result[0] def service_get_by_args(self, context, host, binary): return self._manager.service_get_all_by(context, host=host, @@ -268,6 +283,16 @@ class LocalAPI(object): def service_destroy(self, context, service_id): return self._manager.service_destroy(context, service_id) + def compute_node_create(self, context, values): + return self._manager.compute_node_create(context, values) + + def compute_node_update(self, context, node, values, prune_stats=False): + return self._manager.compute_node_update(context, node, values, + prune_stats) + + def service_update(self, context, service, values): + return self._manager.service_update(context, service, values) + class API(object): """Conductor API that does updates via RPC to the ConductorManager.""" @@ -328,6 +353,10 @@ class API(object): def instance_get_all_by_host(self, context, host): return self.conductor_rpcapi.instance_get_all_by_host(context, host) + def instance_get_all_by_host_and_node(self, context, host, node): + return self.conductor_rpcapi.instance_get_all_by_host(context, + host, node) + def instance_get_all_by_filters(self, context, filters, sort_key='created_at', sort_dir='desc'): @@ -345,6 +374,11 @@ class API(object): return self.conductor_rpcapi.instance_get_active_by_window( context, begin, end, project_id, host) + def instance_get_active_by_window_joined(self, context, begin, end=None, + project_id=None, host=None): + return self.conductor_rpcapi.instance_get_active_by_window_joined( + context, begin, end, project_id, host) + def instance_info_cache_update(self, context, instance, values): return self.conductor_rpcapi.instance_info_cache_update(context, instance, values) @@ -367,6 +401,11 @@ class API(object): return crpcapi.migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute) + def migration_get_in_progress_by_host_and_node(self, context, host, node): + crpcapi = self.conductor_rpcapi + return crpcapi.migration_get_in_progress_by_host_and_node(context, + host, node) + def migration_create(self, context, instance, values): return self.conductor_rpcapi.migration_create(context, instance, values) @@ -493,9 +532,12 @@ class API(object): def service_get_by_host_and_topic(self, context, host, topic): return self.conductor_rpcapi.service_get_all_by(context, topic, host) - def service_get_all_compute_by_host(self, context, host): - return self.conductor_rpcapi.service_get_all_by(context, 'compute', - host) + def service_get_by_compute_host(self, context, host): + result = self.conductor_rpcapi.service_get_all_by(context, 'compute', + host) + # FIXME(comstud): A major revision bump to 2.0 should return a + # single entry, so we should just return 'result' at that point. + return result[0] def service_get_by_args(self, context, host, binary): return self.conductor_rpcapi.service_get_all_by(context, host=host, @@ -512,3 +554,13 @@ class API(object): def service_destroy(self, context, service_id): return self.conductor_rpcapi.service_destroy(context, service_id) + + def compute_node_create(self, context, values): + return self.conductor_rpcapi.compute_node_create(context, values) + + def compute_node_update(self, context, node, values, prune_stats=False): + return self.conductor_rpcapi.compute_node_update(context, node, + values, prune_stats) + + def service_update(self, context, service, values): + return self.conductor_rpcapi.service_update(context, service, values) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 8c6f39f02..87b143912 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at'] class ConductorManager(manager.SchedulerDependentManager): """Mission: TBD.""" - RPC_API_VERSION = '1.30' + RPC_API_VERSION = '1.35' def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', @@ -83,9 +83,13 @@ class ConductorManager(manager.SchedulerDependentManager): def instance_get_all(self, context): return jsonutils.to_primitive(self.db.instance_get_all(context)) - def instance_get_all_by_host(self, context, host): - return jsonutils.to_primitive( - self.db.instance_get_all_by_host(context.elevated(), host)) + def instance_get_all_by_host(self, context, host, node=None): + if node is not None: + result = self.db.instance_get_all_by_host_and_node( + context.elevated(), host, node) + else: + result = self.db.instance_get_all_by_host(context.elevated(), host) + return jsonutils.to_primitive(result) @rpc_common.client_exceptions(exception.MigrationNotFound) def migration_get(self, context, migration_id): @@ -100,6 +104,12 @@ class ConductorManager(manager.SchedulerDependentManager): context, confirm_window, dest_compute) return jsonutils.to_primitive(migrations) + def migration_get_in_progress_by_host_and_node(self, context, + host, node): + migrations = self.db.migration_get_in_progress_by_host_and_node( + context, host, node) + return jsonutils.to_primitive(migrations) + def migration_create(self, context, instance, values): values.update({'instance_uuid': instance['uuid'], 'source_compute': instance['host'], @@ -224,10 +234,14 @@ class ConductorManager(manager.SchedulerDependentManager): def instance_get_active_by_window(self, context, begin, end=None, project_id=None, host=None): - result = self.db.instance_get_active_by_window_joined(context, - begin, end, - project_id, - host) + result = self.db.instance_get_active_by_window(context, begin, end, + project_id, host) + return jsonutils.to_primitive(result) + + def instance_get_active_by_window_joined(self, context, begin, end=None, + project_id=None, host=None): + result = self.db.instance_get_active_by_window_joined( + context, begin, end, project_id, host) return jsonutils.to_primitive(result) def instance_destroy(self, context, instance): @@ -261,8 +275,9 @@ class ConductorManager(manager.SchedulerDependentManager): result = self.db.service_get_all(context) elif all((topic, host)): if topic == 'compute': - result = self.db.service_get_all_compute_by_host(context, - host) + result = self.db.service_get_by_compute_host(context, host) + # FIXME(comstud) Potentially remove this on bump to v2.0 + result = [result] else: result = self.db.service_get_by_host_and_topic(context, host, topic) @@ -290,3 +305,17 @@ class ConductorManager(manager.SchedulerDependentManager): @rpc_common.client_exceptions(exception.ServiceNotFound) def service_destroy(self, context, service_id): self.db.service_destroy(context, service_id) + + def compute_node_create(self, context, values): + result = self.db.compute_node_create(context, values) + return jsonutils.to_primitive(result) + + def compute_node_update(self, context, node, values, prune_stats=False): + result = self.db.compute_node_update(context, node['id'], values, + prune_stats) + return jsonutils.to_primitive(result) + + @rpc_common.client_exceptions(exception.ServiceNotFound) + def service_update(self, context, service, values): + svc = self.db.service_update(context, service['id'], values) + return jsonutils.to_primitive(svc) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index b7f760cf5..1699c85ed 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -63,6 +63,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.28 - Added binary arg to service_get_all_by 1.29 - Added service_destroy 1.30 - Added migration_create + 1.31 - Added migration_get_in_progress_by_host_and_node + 1.32 - Added optional node to instance_get_all_by_host + 1.33 - Added compute_node_create and compute_node_update + 1.34 - Added service_update + 1.35 - Added instance_get_active_by_window_joined """ BASE_RPC_API_VERSION = '1.0' @@ -106,6 +111,12 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): dest_compute=dest_compute) return self.call(context, msg, version='1.20') + def migration_get_in_progress_by_host_and_node(self, context, + host, node): + msg = self.make_msg('migration_get_in_progress_by_host_and_node', + host=host, node=node) + return self.call(context, msg, version='1.31') + def migration_create(self, context, instance, values): instance_p = jsonutils.to_primitive(instance) msg = self.make_msg('migration_create', instance=instance_p, @@ -231,6 +242,13 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): host=host) return self.call(context, msg, version='1.15') + def instance_get_active_by_window_joined(self, context, begin, end=None, + project_id=None, host=None): + msg = self.make_msg('instance_get_active_by_window_joined', + begin=begin, end=end, project_id=project_id, + host=host) + return self.call(context, msg, version='1.35') + def instance_destroy(self, context, instance): instance_p = jsonutils.to_primitive(instance) msg = self.make_msg('instance_destroy', instance=instance_p) @@ -271,9 +289,9 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): msg = self.make_msg('instance_get_all') return self.call(context, msg, version='1.23') - def instance_get_all_by_host(self, context, host): - msg = self.make_msg('instance_get_all_by_host', host=host) - return self.call(context, msg, version='1.23') + def instance_get_all_by_host(self, context, host, node=None): + msg = self.make_msg('instance_get_all_by_host', host=host, node=node) + return self.call(context, msg, version='1.32') def action_event_start(self, context, values): msg = self.make_msg('action_event_start', values=values) @@ -297,3 +315,18 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): def service_destroy(self, context, service_id): msg = self.make_msg('service_destroy', service_id=service_id) return self.call(context, msg, version='1.29') + + def compute_node_create(self, context, values): + msg = self.make_msg('compute_node_create', values=values) + return self.call(context, msg, version='1.33') + + def compute_node_update(self, context, node, values, prune_stats=False): + node_p = jsonutils.to_primitive(node) + msg = self.make_msg('compute_node_update', node=node_p, values=values, + prune_stats=prune_stats) + return self.call(context, msg, version='1.33') + + def service_update(self, context, service, values): + service_p = jsonutils.to_primitive(service) + msg = self.make_msg('service_update', service=service_p, values=values) + return self.call(context, msg, version='1.34') diff --git a/nova/console/manager.py b/nova/console/manager.py index 243c028d9..2045f824d 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -65,7 +65,6 @@ class ConsoleProxyManager(manager.Manager): def init_host(self): self.driver.init_host() - @exception.wrap_exception() def add_console(self, context, instance_id, password=None, port=None, **kwargs): instance = self.db.instance_get(context, instance_id) @@ -93,7 +92,6 @@ class ConsoleProxyManager(manager.Manager): return console['id'] - @exception.wrap_exception() def remove_console(self, context, console_id, **_kwargs): try: console = self.db.console_get(context, console_id) diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index e8eab4db2..bb1818943 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -49,7 +49,7 @@ class ConsoleVMRCManager(manager.Manager): """Get VIM session for the pool specified.""" vim_session = None if pool['id'] not in self.sessions.keys(): - vim_session = vmwareapi_conn.VMWareAPISession( + vim_session = vmwareapi_conn.VMwareAPISession( pool['address'], pool['username'], pool['password'], @@ -75,7 +75,6 @@ class ConsoleVMRCManager(manager.Manager): self.driver.setup_console(context, console) return console - @exception.wrap_exception() def add_console(self, context, instance_id, password=None, port=None, **kwargs): """Adds a console for the instance. @@ -105,7 +104,6 @@ class ConsoleVMRCManager(manager.Manager): instance) return console['id'] - @exception.wrap_exception() def remove_console(self, context, console_id, **_kwargs): """Removes a console entry.""" try: diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py new file mode 100644 index 000000000..ce9243d46 --- /dev/null +++ b/nova/console/websocketproxy.py @@ -0,0 +1,89 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +Websocket proxy that is compatible with OpenStack Nova. +Leverages websockify.py by Joel Martin +''' + +import Cookie +import socket + +import websockify + +from nova.consoleauth import rpcapi as consoleauth_rpcapi +from nova import context +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class NovaWebSocketProxy(websockify.WebSocketProxy): + def __init__(self, *args, **kwargs): + websockify.WebSocketProxy.__init__(self, unix_target=None, + target_cfg=None, + ssl_target=None, *args, **kwargs) + + def new_client(self): + """ + Called after a new WebSocket connection has been established. + """ + cookie = Cookie.SimpleCookie() + cookie.load(self.headers.getheader('cookie')) + token = cookie['token'].value + ctxt = context.get_admin_context() + rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() + connect_info = rpcapi.check_token(ctxt, token=token) + + if not connect_info: + LOG.audit("Invalid Token: %s", token) + raise Exception(_("Invalid Token")) + + host = connect_info['host'] + port = int(connect_info['port']) + + # Connect to the target + self.msg("connecting to: %s:%s" % (host, port)) + LOG.audit("connecting to: %s:%s" % (host, port)) + tsock = self.socket(host, port, connect=True) + + # Handshake as necessary + if connect_info.get('internal_access_path'): + tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" % + connect_info['internal_access_path']) + while True: + data = tsock.recv(4096, socket.MSG_PEEK) + if data.find("\r\n\r\n") != -1: + if not data.split("\r\n")[0].find("200"): + LOG.audit("Invalid Connection Info %s", token) + raise Exception(_("Invalid Connection Info")) + tsock.recv(len(data)) + break + + if self.verbose and not self.daemon: + print(self.traffic_legend) + + # Start proxying + try: + self.do_proxy(tsock) + except Exception: + if tsock: + tsock.shutdown(socket.SHUT_RDWR) + tsock.close() + self.vmsg("%s:%s: Target closed" % (host, port)) + LOG.audit("%s:%s: Target closed" % (host, port)) + raise diff --git a/nova/crypto.py b/nova/crypto.py index ff76a54d0..5c48c60b6 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -135,13 +135,14 @@ def generate_fingerprint(public_key): raise exception.InvalidKeypair() -def generate_key_pair(bits=1024): - # what is the magic 65537? - +def generate_key_pair(bits=None): with utils.tempdir() as tmpdir: keyfile = os.path.join(tmpdir, 'temp') - utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '', - '-t', 'rsa', '-f', keyfile, '-C', 'Generated by Nova') + args = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', + '-f', keyfile, '-C', 'Generated by Nova'] + if bits is not None: + args.extend(['-b', bits]) + utils.execute(*args) fingerprint = _generate_fingerprint('%s.pub' % (keyfile)) if not os.path.exists(keyfile): raise exception.FileNotFound(keyfile) @@ -171,13 +172,44 @@ def decrypt_text(project_id, text): raise exception.ProjectNotFound(project_id=project_id) try: dec, _err = utils.execute('openssl', - 'rsautl', - '-decrypt', - '-inkey', '%s' % private_key, - process_input=text) + 'rsautl', + '-decrypt', + '-inkey', '%s' % private_key, + process_input=text) return dec - except exception.ProcessExecutionError: - raise exception.DecryptionFailure() + except exception.ProcessExecutionError as exc: + raise exception.DecryptionFailure(reason=exc.stderr) + + +def ssh_encrypt_text(ssh_public_key, text): + """Encrypt text with an ssh public key. + + Requires recent ssh-keygen binary in addition to openssl binary. + """ + with utils.tempdir() as tmpdir: + sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key')) + with open(sshkey, 'w') as f: + f.write(ssh_public_key) + sslkey = os.path.abspath(os.path.join(tmpdir, 'ssl.key')) + try: + # NOTE(vish): -P is to skip prompt on bad keys + out, _err = utils.execute('ssh-keygen', + '-P', '', + '-e', + '-f', sshkey, + '-m', 'PKCS8') + with open(sslkey, 'w') as f: + f.write(out) + enc, _err = utils.execute('openssl', + 'rsautl', + '-encrypt', + '-pubin', + '-inkey', sslkey, + '-keyform', 'PEM', + process_input=text) + return enc + except exception.ProcessExecutionError as exc: + raise exception.EncryptionFailure(reason=exc.stderr) def revoke_cert(project_id, file_name): diff --git a/nova/db/api.py b/nova/db/api.py index b1552b480..d8a16c52d 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -151,9 +151,12 @@ def service_get_all_by_host(context, host): return IMPL.service_get_all_by_host(context, host) -def service_get_all_compute_by_host(context, host): - """Get all compute services for a given host.""" - return IMPL.service_get_all_compute_by_host(context, host) +def service_get_by_compute_host(context, host): + """Get the service entry for a given compute host. + + Returns the service entry joined with the compute_node entry. + """ + return IMPL.service_get_by_compute_host(context, host) def service_get_all_compute_sorted(context): @@ -494,6 +497,11 @@ def fixed_ip_get_by_address_detailed(context, address): return IMPL.fixed_ip_get_by_address_detailed(context, address) +def fixed_ip_get_by_floating_address(context, floating_address): + """Get a fixed ip by a floating address.""" + return IMPL.fixed_ip_get_by_floating_address(context, floating_address) + + def fixed_ip_get_by_instance(context, instance_uuid): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_uuid) @@ -751,12 +759,13 @@ def instance_info_cache_update(context, instance_uuid, values, :param values: = dict containing column values to update """ rv = IMPL.instance_info_cache_update(context, instance_uuid, values) - try: - cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(context, - rv) - except Exception: - LOG.exception(_("Failed to notify cells of instance info cache " - "update")) + if update_cells: + try: + cells_rpcapi.CellsAPI().instance_info_cache_update_at_top( + context, rv) + except Exception: + LOG.exception(_("Failed to notify cells of instance info " + "cache update")) return rv @@ -1357,19 +1366,19 @@ def cell_create(context, values): return IMPL.cell_create(context, values) -def cell_update(context, cell_id, values): +def cell_update(context, cell_name, values): """Update a child Cell entry.""" - return IMPL.cell_update(context, cell_id, values) + return IMPL.cell_update(context, cell_name, values) -def cell_delete(context, cell_id): +def cell_delete(context, cell_name): """Delete a child Cell.""" - return IMPL.cell_delete(context, cell_id) + return IMPL.cell_delete(context, cell_name) -def cell_get(context, cell_id): +def cell_get(context, cell_name): """Get a specific child Cell.""" - return IMPL.cell_get(context, cell_id) + return IMPL.cell_get(context, cell_name) def cell_get_all(context): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8930f6ccc..5317487cd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -253,6 +253,12 @@ def exact_filter(query, model, filters, legal_keys): return query +def convert_datetimes(values, *datetime_keys): + for key in values: + if key in datetime_keys and isinstance(values[key], basestring): + values[key] = timeutils.parse_strtime(values[key]) + return values + ################### @@ -337,15 +343,6 @@ def service_get_all(context, disabled=None): @require_admin_context -def service_does_host_exist(context, host_name, include_disabled): - query = get_session().query(func.count(models.Service.host)).\ - filter_by(host=host_name) - if not include_disabled: - query = query.filter_by(disabled=False) - return query.scalar() > 0 - - -@require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ @@ -370,12 +367,12 @@ def service_get_all_by_host(context, host): @require_admin_context -def service_get_all_compute_by_host(context, host): +def service_get_by_compute_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic=CONF.compute_topic).\ - all() + first() if not result: raise exception.ComputeHostNotFound(host=host) @@ -451,6 +448,7 @@ def service_update(context, service_id, values): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) + return service_ref ################### @@ -506,6 +504,7 @@ def compute_node_create(context, values): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" _prep_stats_dict(values) + convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at') compute_node_ref = models.ComputeNode() compute_node_ref.update(values) @@ -554,9 +553,10 @@ def compute_node_update(context, compute_id, values, prune_stats=False): stats = values.pop('stats', {}) session = get_session() - with session.begin(subtransactions=True): + with session.begin(): _update_stats(context, stats, compute_id, session, prune_stats) compute_ref = _compute_node_get(context, compute_id, session=session) + convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at') compute_ref.update(values) return compute_ref @@ -891,15 +891,12 @@ def _floating_ip_get_by_address(context, address, session=None): @require_context def floating_ip_get_by_fixed_address(context, fixed_address): - subq = model_query(context, models.FixedIp.id).\ - filter_by(address=fixed_address).\ - limit(1).\ - subquery() return model_query(context, models.FloatingIp).\ - filter_by(fixed_ip_id=subq.as_scalar()).\ - all() - - # NOTE(tr3buchet) please don't invent an exception here, empty list is fine + outerjoin(models.FixedIp, + models.FixedIp.id == + models.FloatingIp.fixed_ip_id).\ + filter(models.FixedIp.address == fixed_address).\ + all() @require_context @@ -1196,6 +1193,17 @@ def fixed_ip_get_by_address_detailed(context, address, session=None): @require_context +def fixed_ip_get_by_floating_address(context, floating_address): + return model_query(context, models.FixedIp).\ + outerjoin(models.FloatingIp, + models.FloatingIp.fixed_ip_id == + models.FixedIp.id).\ + filter(models.FloatingIp.address == floating_address).\ + first() + # NOTE(tr3buchet) please don't invent an exception here, empty list is fine + + +@require_context def fixed_ip_get_by_instance(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) @@ -1787,42 +1795,6 @@ def instance_get_all_hung_in_rebooting(context, reboot_window): @require_context -def instance_test_and_set(context, instance_uuid, attr, ok_states, new_state): - """Atomically check if an instance is in a valid state, and if it is, set - the instance into a new state. - """ - if not uuidutils.is_uuid_like(instance_uuid): - raise exception.InvalidUUID(instance_uuid) - - session = get_session() - with session.begin(): - query = model_query(context, models.Instance, session=session, - project_only=True).\ - filter_by(uuid=instance_uuid) - - attr_column = getattr(models.Instance, attr) - filter_op = None - # NOTE(boris-42): `SELECT IN` doesn't work with None values because - # they are incomparable. - if None in ok_states: - filter_op = or_(attr_column == None, - attr_column.in_(filter(lambda x: x is not None, - ok_states))) - else: - filter_op = attr_column.in_(ok_states) - - count = query.filter(filter_op).\ - update({attr: new_state}, synchronize_session=False) - if count == 0: - instance_ref = query.first() - raise exception.InstanceInvalidState( - attr=attr, - instance_uuid=instance_ref['uuid'], - state=instance_ref[attr], - method='instance_test_and_set') - - -@require_context def instance_update(context, instance_uuid, values): instance_ref = _instance_update(context, instance_uuid, values)[1] return instance_ref @@ -3694,7 +3666,7 @@ def instance_type_destroy(context, name): @require_context def _instance_type_access_query(context, session=None): return model_query(context, models.InstanceTypeProjects, session=session, - read_deleted="yes") + read_deleted="no") @require_admin_context @@ -3710,6 +3682,8 @@ def instance_type_access_get_by_flavor_id(context, flavor_id): @require_admin_context def instance_type_access_add(context, flavor_id, project_id): """Add given tenant to the flavor access list.""" + # NOTE(boris-42): There is a race condition in this method and it will be + # rewritten after bp/db-unique-keys implementation. session = get_session() with session.begin(): instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id, @@ -3717,21 +3691,16 @@ def instance_type_access_add(context, flavor_id, project_id): instance_type_id = instance_type_ref['id'] access_ref = _instance_type_access_query(context, session=session).\ filter_by(instance_type_id=instance_type_id).\ - filter_by(project_id=project_id).first() - - if not access_ref: - access_ref = models.InstanceTypeProjects() - access_ref.instance_type_id = instance_type_id - access_ref.project_id = project_id - access_ref.save(session=session) - elif access_ref.deleted: - access_ref.update({'deleted': False, - 'deleted_at': None}) - access_ref.save(session=session) - else: + filter_by(project_id=project_id).\ + first() + if access_ref: raise exception.FlavorAccessExists(flavor_id=flavor_id, project_id=project_id) + access_ref = models.InstanceTypeProjects() + access_ref.update({"instance_type_id": instance_type_id, + "project_id": project_id}) + access_ref.save(session=session) return access_ref @@ -3747,7 +3716,6 @@ def instance_type_access_remove(context, flavor_id, project_id): filter_by(instance_type_id=instance_type_id).\ filter_by(project_id=project_id).\ soft_delete() - if count == 0: raise exception.FlavorAccessNotFound(flavor_id=flavor_id, project_id=project_id) @@ -3764,34 +3732,30 @@ def cell_create(context, values): return cell -def _cell_get_by_id_query(context, cell_id, session=None): - return model_query(context, models.Cell, session=session).\ - filter_by(id=cell_id) +def _cell_get_by_name_query(context, cell_name, session=None): + return model_query(context, models.Cell, + session=session).filter_by(name=cell_name) @require_admin_context -def cell_update(context, cell_id, values): - cell = cell_get(context, cell_id) - cell.update(values) - cell.save() +def cell_update(context, cell_name, values): + session = get_session() + with session.begin(): + cell = _cell_get_by_name_query(context, cell_name, session=session) + cell.update(values) return cell @require_admin_context -def cell_delete(context, cell_id): - session = get_session() - with session.begin(): - return _cell_get_by_id_query(context, cell_id, session=session).\ - delete() +def cell_delete(context, cell_name): + return _cell_get_by_name_query(context, cell_name).soft_delete() @require_admin_context -def cell_get(context, cell_id): - result = _cell_get_by_id_query(context, cell_id).first() - +def cell_get(context, cell_name): + result = _cell_get_by_name_query(context, cell_name).first() if not result: - raise exception.CellNotFound(cell_id=cell_id) - + raise exception.CellNotFound(cell_name=cell_name) return result @@ -4119,21 +4083,42 @@ def instance_type_extra_specs_get_item(context, flavor_id, key, @require_context -def instance_type_extra_specs_update_or_create(context, flavor_id, - specs): +def instance_type_extra_specs_update_or_create(context, flavor_id, specs): + # NOTE(boris-42): There is a race condition in this method. We should add + # UniqueConstraint on (instance_type_id, key, deleted) to + # avoid duplicated instance_type_extra_specs. This will be + # possible after bp/db-unique-keys implementation. session = get_session() - spec_ref = None - instance_type = instance_type_get_by_flavor_id(context, flavor_id) - for key, value in specs.iteritems(): - try: - spec_ref = instance_type_extra_specs_get_item( - context, flavor_id, key, session) - except exception.InstanceTypeExtraSpecsNotFound: + with session.begin(): + instance_type_id = model_query(context, models.InstanceTypes.id, + session=session, read_deleted="no").\ + filter(models.InstanceTypes.flavorid == flavor_id).\ + first() + if not instance_type_id: + raise exception.FlavorNotFound(flavor_id=flavor_id) + + instance_type_id = instance_type_id.id + + spec_refs = model_query(context, models.InstanceTypeExtraSpecs, + session=session, read_deleted="no").\ + filter_by(instance_type_id=instance_type_id).\ + filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\ + all() + + existing_keys = set() + for spec_ref in spec_refs: + key = spec_ref["key"] + existing_keys.add(key) + spec_ref.update({"value": specs[key]}) + + for key, value in specs.iteritems(): + if key in existing_keys: + continue spec_ref = models.InstanceTypeExtraSpecs() - spec_ref.update({"key": key, "value": value, - "instance_type_id": instance_type["id"], - "deleted": False}) - spec_ref.save(session=session) + spec_ref.update({"key": key, "value": value, + "instance_type_id": instance_type_id}) + session.add(spec_ref) + return specs @@ -4463,28 +4448,33 @@ def aggregate_metadata_get_item(context, aggregate_id, key, session=None): @require_admin_context @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): + # NOTE(boris-42): There is a race condition in this method. We should add + # UniqueConstraint on (start_period, uuid, mac, deleted) to + # avoid duplicated aggregate_metadata. This will be + # possible after bp/db-unique-keys implementation. session = get_session() all_keys = metadata.keys() with session.begin(): query = aggregate_metadata_get_query(context, aggregate_id, + read_deleted='no', session=session) if set_delete: query.filter(~models.AggregateMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) query = query.filter(models.AggregateMetadata.key.in_(all_keys)) - already_existing_keys = [] + already_existing_keys = set() for meta_ref in query.all(): key = meta_ref.key - meta_ref.update({"value": metadata[key], - "deleted": False, - "deleted_at": None}) - already_existing_keys.append(key) + meta_ref.update({"value": metadata[key]}) + already_existing_keys.add(key) - for key in set(all_keys) - set(already_existing_keys): + for key, value in metadata.iteritems(): + if key in already_existing_keys: + continue meta_ref = models.AggregateMetadata() meta_ref.update({"key": key, - "value": metadata[key], + "value": value, "aggregate_id": aggregate_id}) session.add(meta_ref) @@ -4518,25 +4508,24 @@ def aggregate_host_delete(context, aggregate_id, host): @require_admin_context @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): + # NOTE(boris-42): There is a race condition in this method and it will be + # rewritten after bp/db-unique-keys implementation. session = get_session() - host_ref = _aggregate_get_query(context, - models.AggregateHost, - models.AggregateHost.aggregate_id, - aggregate_id, - session=session, - read_deleted='yes').\ - filter_by(host=host).first() - if not host_ref: + with session.begin(): + host_ref = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.aggregate_id, + aggregate_id, + session=session, + read_deleted='no').\ + filter_by(host=host).\ + first() + if host_ref: + raise exception.AggregateHostExists(host=host, + aggregate_id=aggregate_id) host_ref = models.AggregateHost() - values = {"host": host, "aggregate_id": aggregate_id, } - host_ref.update(values) + host_ref.update({"host": host, "aggregate_id": aggregate_id}) host_ref.save(session=session) - elif host_ref.deleted: - host_ref.update({'deleted': False, 'deleted_at': None}) - host_ref.save(session=session) - else: - raise exception.AggregateHostExists(host=host, - aggregate_id=aggregate_id) return host_ref @@ -4740,49 +4729,44 @@ def _ec2_instance_get_query(context, session=None): @require_admin_context -def task_log_get(context, task_name, period_beginning, - period_ending, host, state=None, session=None): +def _task_log_get_query(context, task_name, period_beginning, + period_ending, host=None, state=None, session=None): query = model_query(context, models.TaskLog, session=session).\ filter_by(task_name=task_name).\ filter_by(period_beginning=period_beginning).\ - filter_by(period_ending=period_ending).\ - filter_by(host=host) + filter_by(period_ending=period_ending) + if host is not None: + query = query.filter_by(host=host) if state is not None: query = query.filter_by(state=state) + return query - return query.first() + +@require_admin_context +def task_log_get(context, task_name, period_beginning, period_ending, host, + state=None): + return _task_log_get_query(task_name, period_beginning, period_ending, + host, state).first() @require_admin_context -def task_log_get_all(context, task_name, period_beginning, - period_ending, host=None, state=None, session=None): - query = model_query(context, models.TaskLog, session=session).\ - filter_by(task_name=task_name).\ - filter_by(period_beginning=period_beginning).\ - filter_by(period_ending=period_ending) - if host is not None: - query = query.filter_by(host=host) - if state is not None: - query = query.filter_by(state=state) - return query.all() +def task_log_get_all(context, task_name, period_beginning, period_ending, + host=None, state=None): + return _task_log_get_query(task_name, period_beginning, period_ending, + host, state).all() @require_admin_context -def task_log_begin_task(context, task_name, - period_beginning, - period_ending, - host, - task_items=None, - message=None, - session=None): - session = session or get_session() +def task_log_begin_task(context, task_name, period_beginning, period_ending, + host, task_items=None, message=None): + # NOTE(boris-42): This method has a race condition and will be rewritten + # after bp/db-unique-keys implementation. + session = get_session() with session.begin(): - task = task_log_get(context, task_name, - period_beginning, - period_ending, - host, - session=session) - if task: + task_ref = _task_log_get_query(context, task_name, period_beginning, + period_ending, host, session=session).\ + first() + if task_ref: #It's already run(ning)! raise exception.TaskAlreadyRunning(task_name=task_name, host=host) task = models.TaskLog() @@ -4796,30 +4780,20 @@ def task_log_begin_task(context, task_name, if task_items: task.task_items = task_items task.save(session=session) - return task @require_admin_context -def task_log_end_task(context, task_name, - period_beginning, - period_ending, - host, - errors, - message=None, - session=None): - session = session or get_session() +def task_log_end_task(context, task_name, period_beginning, period_ending, + host, errors, message=None): + values = dict(state="DONE", errors=errors) + if message: + values["message"] = message + + session = get_session() with session.begin(): - task = task_log_get(context, task_name, - period_beginning, - period_ending, - host, - session=session) - if not task: + rows = _task_log_get_query(context, task_name, period_beginning, + period_ending, host, session=session).\ + update(values) + if rows == 0: #It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) - task.state = "DONE" - if message: - task.message = message - task.errors = errors - task.save(session=session) - return task diff --git a/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py b/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py new file mode 100644 index 000000000..fe9889e35 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, String, Table +from sqlalchemy.dialects import postgresql + + +TABLE_COLUMNS = [ + # table name, column name + ('instances', 'access_ip_v4'), + ('instances', 'access_ip_v6'), + ('security_group_rules', 'cidr'), + ('provider_fw_rules', 'cidr'), + ('networks', 'cidr'), + ('networks', 'cidr_v6'), + ('networks', 'gateway'), + ('networks', 'gateway_v6'), + ('networks', 'netmask'), + ('networks', 'netmask_v6'), + ('networks', 'broadcast'), + ('networks', 'dns1'), + ('networks', 'dns2'), + ('networks', 'vpn_public_address'), + ('networks', 'vpn_private_address'), + ('networks', 'dhcp_start'), + ('fixed_ips', 'address'), + ('floating_ips', 'address'), + ('console_pools', 'address')] + + +def upgrade(migrate_engine): + """Convert String columns holding IP addresses to INET for postgresql.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect() + if dialect is postgresql.dialect: + for table, column in TABLE_COLUMNS: + # can't use migrate's alter() because it does not support + # explicit casting + migrate_engine.execute( + "ALTER TABLE %(table)s " + "ALTER COLUMN %(column)s TYPE INET USING %(column)s::INET" + % locals()) + else: + for table, column in TABLE_COLUMNS: + t = Table(table, meta, autoload=True) + getattr(t.c, column).alter(type=String(39)) + + +def downgrade(migrate_engine): + """Convert columns back to the larger String(255).""" + meta = MetaData() + meta.bind = migrate_engine + for table, column in TABLE_COLUMNS: + t = Table(table, meta, autoload=True) + getattr(t.c, column).alter(type=String(255)) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py b/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py new file mode 100644 index 000000000..3fd87e1e1 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Index, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_faults = Table('instance_faults', meta, autoload=True) + host = Column('host', String(length=255)) + instance_faults.create_column(host) + Index('instance_faults_host_idx', instance_faults.c.host).create( + migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_faults = Table('instance_faults', meta, autoload=True) + instance_faults.drop_column('host') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py new file mode 100644 index 000000000..44c3aa41f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Wenhao Xu <xuwenhao2008@gmail.com>. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, String, Table, DateTime +from sqlalchemy.dialects import postgresql + + +def upgrade(migrate_engine): + """Convert period_beginning and period_ending to DateTime.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect() + + if dialect is postgresql.dialect: + # We need to handle postresql specially. + # Can't use migrate's alter() because it does not support + # explicit casting + for column in ('period_beginning', 'period_ending'): + migrate_engine.execute( + "ALTER TABLE task_log " + "ALTER COLUMN %s TYPE TIMESTAMP WITHOUT TIME ZONE " + "USING %s::TIMESTAMP WITHOUT TIME ZONE" + % (column, column)) + else: + migrations = Table('task_log', meta, autoload=True) + migrations.c.period_beginning.alter(DateTime) + migrations.c.period_ending.alter(DateTime) + + +def downgrade(migrate_engine): + """Convert columns back to String(255).""" + meta = MetaData() + meta.bind = migrate_engine + + # don't need to handle postgresql here. + migrations = Table('task_log', meta, autoload=True) + migrations.c.period_beginning.alter(String(255)) + migrations.c.period_ending.alter(String(255)) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 52985a3eb..baa966dbc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -27,6 +27,7 @@ from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float from sqlalchemy.orm import relationship, backref, object_mapper from nova.db.sqlalchemy.session import get_session +from nova.db.sqlalchemy.types import IPAddress from nova.openstack.common import cfg from nova.openstack.common import timeutils @@ -290,8 +291,8 @@ class Instance(BASE, NovaBase): # User editable field meant to represent what ip should be used # to connect to the instance - access_ip_v4 = Column(String(255)) - access_ip_v6 = Column(String(255)) + access_ip_v4 = Column(IPAddress()) + access_ip_v6 = Column(IPAddress()) auto_disk_config = Column(Boolean()) progress = Column(Integer) @@ -592,7 +593,7 @@ class SecurityGroupIngressRule(BASE, NovaBase): protocol = Column(String(5)) # "tcp", "udp", or "icmp" from_port = Column(Integer) to_port = Column(Integer) - cidr = Column(String(255)) + cidr = Column(IPAddress()) # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. @@ -612,7 +613,7 @@ class ProviderFirewallRule(BASE, NovaBase): protocol = Column(String(5)) # "tcp", "udp", or "icmp" from_port = Column(Integer) to_port = Column(Integer) - cidr = Column(String(255)) + cidr = Column(IPAddress()) class KeyPair(BASE, NovaBase): @@ -662,25 +663,25 @@ class Network(BASE, NovaBase): label = Column(String(255)) injected = Column(Boolean, default=False) - cidr = Column(String(255), unique=True) - cidr_v6 = Column(String(255), unique=True) + cidr = Column(IPAddress(), unique=True) + cidr_v6 = Column(IPAddress(), unique=True) multi_host = Column(Boolean, default=False) - gateway_v6 = Column(String(255)) - netmask_v6 = Column(String(255)) - netmask = Column(String(255)) + gateway_v6 = Column(IPAddress()) + netmask_v6 = Column(IPAddress()) + netmask = Column(IPAddress()) bridge = Column(String(255)) bridge_interface = Column(String(255)) - gateway = Column(String(255)) - broadcast = Column(String(255)) - dns1 = Column(String(255)) - dns2 = Column(String(255)) + gateway = Column(IPAddress()) + broadcast = Column(IPAddress()) + dns1 = Column(IPAddress()) + dns2 = Column(IPAddress()) vlan = Column(Integer) - vpn_public_address = Column(String(255)) + vpn_public_address = Column(IPAddress()) vpn_public_port = Column(Integer) - vpn_private_address = Column(String(255)) - dhcp_start = Column(String(255)) + vpn_private_address = Column(IPAddress()) + dhcp_start = Column(IPAddress()) rxtx_base = Column(Integer) @@ -705,7 +706,7 @@ class FixedIp(BASE, NovaBase): """Represents a fixed ip for an instance.""" __tablename__ = 'fixed_ips' id = Column(Integer, primary_key=True) - address = Column(String(255)) + address = Column(IPAddress()) network_id = Column(Integer, nullable=True) virtual_interface_id = Column(Integer, nullable=True) instance_uuid = Column(String(36), nullable=True) @@ -722,7 +723,7 @@ class FloatingIp(BASE, NovaBase): """Represents a floating ip that dynamically forwards to a fixed ip.""" __tablename__ = 'floating_ips' id = Column(Integer, primary_key=True) - address = Column(String(255)) + address = Column(IPAddress()) fixed_ip_id = Column(Integer, nullable=True) project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) @@ -744,7 +745,7 @@ class ConsolePool(BASE, NovaBase): """Represents pool of consoles on the same physical node.""" __tablename__ = 'console_pools' id = Column(Integer, primary_key=True) - address = Column(String(255)) + address = Column(IPAddress()) username = Column(String(255)) password = Column(String(255)) console_type = Column(String(255)) @@ -991,6 +992,7 @@ class InstanceFault(BASE, NovaBase): code = Column(Integer(), nullable=False) message = Column(String(255)) details = Column(Text) + host = Column(String(255)) class InstanceAction(BASE, NovaBase): @@ -1036,8 +1038,8 @@ class TaskLog(BASE, NovaBase): task_name = Column(String(255), nullable=False) state = Column(String(255), nullable=False) host = Column(String(255)) - period_beginning = Column(String(255), default=timeutils.utcnow) - period_ending = Column(String(255), default=timeutils.utcnow) + period_beginning = Column(DateTime, default=timeutils.utcnow) + period_ending = Column(DateTime, default=timeutils.utcnow) message = Column(String(255), nullable=False) task_items = Column(Integer(), default=0) errors = Column(Integer(), default=0) diff --git a/nova/db/sqlalchemy/types.py b/nova/db/sqlalchemy/types.py new file mode 100644 index 000000000..275e61a4c --- /dev/null +++ b/nova/db/sqlalchemy/types.py @@ -0,0 +1,26 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Custom SQLAlchemy types.""" + +from sqlalchemy.dialects import postgresql +from sqlalchemy import String + + +def IPAddress(): + """An SQLAlchemy type representing an IP-address.""" + return String(39).with_variant(postgresql.INET(), 'postgresql') diff --git a/nova/exception.py b/nova/exception.py index 7ec23d32d..c15fc1e43 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -82,9 +82,11 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( def inner(f): - def wrapped(*args, **kw): + def wrapped(self, context, *args, **kw): + # Don't store self or context in the payload, it now seems to + # contain confidential information. try: - return f(*args, **kw) + return f(self, context, *args, **kw) except Exception, e: with excutils.save_and_reraise_exception(): if notifier: @@ -104,10 +106,6 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, # propagated. temp_type = f.__name__ - context = get_context_from_function_and_args(f, - args, - kw) - notifier.notify(context, publisher_id, temp_type, temp_level, payload) @@ -181,8 +179,12 @@ class DBDuplicateEntry(DBError): super(DBDuplicateEntry, self).__init__(inner_exception) +class EncryptionFailure(NovaException): + message = _("Failed to encrypt text: %(reason)s") + + class DecryptionFailure(NovaException): - message = _("Failed to decrypt text") + message = _("Failed to decrypt text: %(reason)s") class VirtualInterfaceCreateException(NovaException): @@ -524,6 +526,14 @@ class PortNotFound(NotFound): message = _("Port %(port_id)s could not be found.") +class PortNotUsable(NovaException): + message = _("Port %(port_id)s not usable for instance %(instance)s.") + + +class PortNotFree(NovaException): + message = _("No free port available for instance %(instance)s.") + + class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") @@ -770,7 +780,7 @@ class FlavorAccessNotFound(NotFound): class CellNotFound(NotFound): - message = _("Cell %(cell_id)s could not be found.") + message = _("Cell %(cell_name)s doesn't exist.") class CellRoutingInconsistency(NovaException): @@ -1089,20 +1099,3 @@ class CryptoCAFileNotFound(FileNotFound): class CryptoCRLFileNotFound(FileNotFound): message = _("The CRL file for %(project)s could not be found") - - -def get_context_from_function_and_args(function, args, kwargs): - """Find an arg of type RequestContext and return it. - - This is useful in a couple of decorators where we don't - know much about the function we're wrapping. - """ - - # import here to avoid circularity: - from nova import context - - for arg in itertools.chain(kwargs.values(), args): - if isinstance(arg, context.RequestContext): - return arg - - return None diff --git a/nova/image/glance.py b/nova/image/glance.py index 75551d35c..1a6bba62f 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -22,6 +22,7 @@ from __future__ import absolute_import import copy import itertools import random +import shutil import sys import time import urlparse @@ -58,7 +59,12 @@ glance_opts = [ cfg.IntOpt('glance_num_retries', default=0, help='Number retries when downloading an image from glance'), -] + cfg.ListOpt('allowed_direct_url_schemes', + default=[], + help='A list of url scheme that can be downloaded directly ' + 'via the direct_url. Currently supported schemes: ' + '[file].'), + ] LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -254,6 +260,18 @@ class GlanceImageService(object): def download(self, context, image_id, data): """Calls out to Glance for metadata and data and writes data.""" + if 'file' in CONF.allowed_direct_url_schemes: + location = self.get_location(context, image_id) + o = urlparse.urlparse(location) + if o.scheme == "file": + with open(o.path, "r") as f: + # FIXME(jbresnah) a system call to cp could have + # significant performance advantages, however we + # do not have the path to files at this point in + # the abstraction. + shutil.copyfileobj(f, data) + return + try: image_chunks = self._client.call(context, 1, 'data', image_id) except Exception: diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot index 3fb397298..347b98733 100644 --- a/nova/locale/nova.pot +++ b/nova/locale/nova.pot @@ -7538,7 +7538,7 @@ msgstr "" #: nova/virt/vmwareapi/driver.py:107 msgid "" "Must specify vmwareapi_host_ip,vmwareapi_host_username and " -"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver" +"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver" msgstr "" #: nova/virt/vmwareapi/driver.py:258 @@ -7635,7 +7635,7 @@ msgstr "" #: nova/virt/vmwareapi/read_write_util.py:142 #, python-format -msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s" msgstr "" #: nova/virt/vmwareapi/vim.py:83 diff --git a/nova/manager.py b/nova/manager.py index cb15b776e..7df63f719 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -215,8 +215,9 @@ class Manager(base.Base): if self._periodic_spacing[task_name] is None: wait = 0 else: - wait = time.time() - (self._periodic_last_run[task_name] + - self._periodic_spacing[task_name]) + due = (self._periodic_last_run[task_name] + + self._periodic_spacing[task_name]) + wait = max(0, due - time.time()) if wait > 0.2: if wait < idle_for: idle_for = wait diff --git a/nova/network/api.py b/nova/network/api.py index 25680e656..5e3762e89 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -74,7 +74,11 @@ def update_instance_cache_with_nw_info(api, context, instance, class API(base.Base): - """API for interacting with the network manager.""" + """API for doing networking via the nova-network network manager. + + This is a pluggable module - other implementations do networking via + other services (such as Quantum). + """ _sentinel = object() @@ -107,7 +111,7 @@ class API(base.Base): return self.network_rpcapi.get_floating_ip(context, id) def get_floating_ip_pools(self, context): - return self.network_rpcapi.get_floating_pools(context) + return self.network_rpcapi.get_floating_ip_pools(context) def get_floating_ip_by_address(self, context, address): return self.network_rpcapi.get_floating_ip_by_address(context, address) @@ -180,9 +184,15 @@ class API(base.Base): @refresh_cache def allocate_for_instance(self, context, instance, vpn, - requested_networks): + requested_networks, macs=None): """Allocates all network structures for an instance. + TODO(someone): document the rest of these parameters. + + :param macs: None or a set of MAC addresses that the instance + should use. macs is supplied by the hypervisor driver (contrast + with requested_networks which is user supplied). + NB: macs is ignored by nova-network. :returns: network info as from get_instance_nw_info() below """ args = {} diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index e6abde609..4fefb2db4 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -371,19 +371,32 @@ class IptablesManager(object): s += [('ip6tables', self.ipv6)] for cmd, tables in s: + all_tables, _err = self.execute('%s-save' % (cmd,), '-c', + run_as_root=True, + attempts=5) + all_lines = all_tables.split('\n') for table in tables: - current_table, _err = self.execute('%s-save' % (cmd,), '-c', - '-t', '%s' % (table,), - run_as_root=True, - attempts=5) - current_lines = current_table.split('\n') - new_filter = self._modify_rules(current_lines, - tables[table]) - self.execute('%s-restore' % (cmd,), '-c', run_as_root=True, - process_input='\n'.join(new_filter), - attempts=5) + start, end = self._find_table(all_lines, table) + all_lines[start:end] = self._modify_rules( + all_lines[start:end], tables[table]) + self.execute('%s-restore' % (cmd,), '-c', run_as_root=True, + process_input='\n'.join(all_lines), + attempts=5) LOG.debug(_("IPTablesManager.apply completed with success")) + def _find_table(self, lines, table_name): + if len(lines) < 3: + # length only <2 when fake iptables + return (0, 0) + try: + start = lines.index('*%s' % table_name) - 1 + except ValueError: + # Couldn't find table_name + # For Unit Tests + return (0, 0) + end = lines[start:].index('COMMIT') + start + 2 + return (start, end) + def _modify_rules(self, current_lines, table, binary=None): unwrapped_chains = table.unwrapped_chains chains = table.chains @@ -1150,7 +1163,7 @@ class LinuxNetInterfaceDriver(object): raise NotImplementedError() def unplug(self, network): - """Destory Linux device, return device name.""" + """Destroy Linux device, return device name.""" raise NotImplementedError() def get_dev(self, network): @@ -1390,7 +1403,7 @@ def remove_ebtables_rules(rules): def isolate_dhcp_address(interface, address): - # block arp traffic to address accross the interface + # block arp traffic to address across the interface rules = [] rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP' % (interface, address)) @@ -1406,7 +1419,7 @@ def isolate_dhcp_address(interface, address): ipv4_filter.add_rule('FORWARD', '-m physdev --physdev-out %s -d 255.255.255.255 ' '-p udp --dport 67 -j DROP' % interface, top=True) - # block ip traffic to address accross the interface + # block ip traffic to address across the interface ipv4_filter.add_rule('FORWARD', '-m physdev --physdev-in %s -d %s -j DROP' % (interface, address), top=True) @@ -1416,7 +1429,7 @@ def isolate_dhcp_address(interface, address): def remove_isolate_dhcp_address(interface, address): - # block arp traffic to address accross the interface + # block arp traffic to address across the interface rules = [] rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP' % (interface, address)) @@ -1432,7 +1445,7 @@ def remove_isolate_dhcp_address(interface, address): ipv4_filter.remove_rule('FORWARD', '-m physdev --physdev-out %s -d 255.255.255.255 ' '-p udp --dport 67 -j DROP' % interface, top=True) - # block ip traffic to address accross the interface + # block ip traffic to address across the interface ipv4_filter.remove_rule('FORWARD', '-m physdev --physdev-in %s -d %s -j DROP' % (interface, address), top=True) diff --git a/nova/network/manager.py b/nova/network/manager.py index ccdac6f60..9ca7680a5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -147,9 +147,6 @@ network_opts = [ cfg.BoolOpt('auto_assign_floating_ip', default=False, help='Autoassigning floating ip to VM'), - cfg.StrOpt('network_host', - default=socket.getfqdn(), - help='Network host to use for ip allocation in flat modes'), cfg.BoolOpt('fake_network', default=False, help='If passed, use fake network devices and addresses'), @@ -482,7 +479,7 @@ class FloatingIP(object): @wrap_check_policy def deallocate_floating_ip(self, context, address, affect_auto_assigned=False): - """Returns an floating ip to the pool.""" + """Returns a floating ip to the pool.""" floating_ip = self.db.floating_ip_get_by_address(context, address) # handle auto_assigned @@ -568,7 +565,7 @@ class FloatingIP(object): else: host = network['host'] - interface = CONF.public_interface or floating_ip['interface'] + interface = floating_ip.get('interface') if host == self.host: # i'm the correct host self._associate_floating_ip(context, floating_address, @@ -585,6 +582,7 @@ class FloatingIP(object): def _associate_floating_ip(self, context, floating_address, fixed_address, interface, instance_uuid): """Performs db and driver calls to associate floating ip & fixed ip.""" + interface = CONF.public_interface or interface @lockutils.synchronized(unicode(floating_address), 'nova-') def do_associate(): @@ -642,7 +640,7 @@ class FloatingIP(object): # send to correct host, unless i'm the correct host network = self._get_network_by_id(context, fixed_ip['network_id']) - interface = CONF.public_interface or floating_ip['interface'] + interface = floating_ip.get('interface') if network['multi_host']: instance = self.db.instance_get_by_uuid(context, fixed_ip['instance_uuid']) @@ -672,7 +670,7 @@ class FloatingIP(object): def _disassociate_floating_ip(self, context, address, interface, instance_uuid): """Performs db and driver calls to disassociate floating ip.""" - # disassociate floating ip + interface = CONF.public_interface or interface @lockutils.synchronized(unicode(address), 'nova-') def do_disassociate(): @@ -680,7 +678,7 @@ class FloatingIP(object): # actually remove the ip address on the host. We are # safe from races on this host due to the decorator, # but another host might grab the ip right away. We - # don't worry about this case because the miniscule + # don't worry about this case because the minuscule # window where the ip is on both hosts shouldn't cause # any problems. fixed_address = self.db.floating_ip_disassociate(context, address) @@ -710,6 +708,13 @@ class FloatingIP(object): @wrap_check_policy def get_floating_pools(self, context): """Returns list of floating pools.""" + # NOTE(maurosr) This method should be removed in future, replaced by + # get_floating_ip_pools. See bug #1091668 + return self.get_floating_ip_pools(context) + + @wrap_check_policy + def get_floating_ip_pools(self, context): + """Returns list of floating ip pools.""" pools = self.db.floating_ip_get_pools(context) return [dict(pool.iteritems()) for pool in pools] @@ -1925,21 +1930,11 @@ class NetworkManager(manager.SchedulerDependentManager): def get_instance_id_by_floating_address(self, context, address): """Returns the instance id a floating ip's fixed ip is allocated to.""" - floating_ip = self.db.floating_ip_get_by_address(context, address) - if floating_ip['fixed_ip_id'] is None: + fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address) + if fixed_ip is None: return None - - fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id']) - - # NOTE(tr3buchet): this can be None - # NOTE(mikal): we need to return the instance id here because its used - # by ec2 (and possibly others) - uuid = fixed_ip['instance_uuid'] - if not uuid: - return uuid - - instance = self.db.instance_get_by_uuid(context, uuid) - return instance['id'] + else: + return fixed_ip['instance_uuid'] @wrap_check_policy def get_network(self, context, network_uuid): @@ -2077,6 +2072,13 @@ class FlatManager(NetworkManager): @wrap_check_policy def get_floating_pools(self, context): """Returns list of floating pools.""" + # NOTE(maurosr) This method should be removed in future, replaced by + # get_floating_ip_pools. See bug #1091668 + return {} + + @wrap_check_policy + def get_floating_ip_pools(self, context): + """Returns list of floating ip pools.""" return {} @wrap_check_policy diff --git a/nova/network/model.py b/nova/network/model.py index f0a5d9d89..0771156c1 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -32,9 +32,10 @@ VIF_TYPE_802_QBG = '802.1qbg' VIF_TYPE_802_QBH = '802.1qbh' VIF_TYPE_OTHER = 'other' -# Constant for max length of 'bridge' in Network class -# Chosen to match max Linux NIC name length -BRIDGE_NAME_LEN = 14 +# Constant for max length of network interface names +# eg 'bridge' in the Network class or 'devname' in +# the VIF class +NIC_NAME_LEN = 14 class Model(dict): @@ -206,13 +207,14 @@ class Network(Model): class VIF(Model): """Represents a Virtual Interface in Nova.""" def __init__(self, id=None, address=None, network=None, type=None, - **kwargs): + devname=None, **kwargs): super(VIF, self).__init__() self['id'] = id self['address'] = address self['network'] = network or None self['type'] = type + self['devname'] = devname self._set_meta(kwargs) @@ -248,7 +250,7 @@ class VIF(Model): 'meta': {...}}] """ if self['network']: - # remove unecessary fields on fixed_ips + # remove unnecessary fields on fixed_ips ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()] for ip in ips: # remove floating ips from IP, since this is a flat structure @@ -377,6 +379,7 @@ class NetworkInfo(list): 'broadcast': str(subnet_v4.as_netaddr().broadcast), 'mac': vif['address'], 'vif_type': vif['type'], + 'vif_devname': vif.get('devname'), 'vif_uuid': vif['id'], 'rxtx_cap': vif.get_meta('rxtx_cap', 0), 'dns': [get_ip(ip) for ip in subnet_v4['dns']], diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py index 51386b4fd..29e5e2f06 100644 --- a/nova/network/quantumv2/api.py +++ b/nova/network/quantumv2/api.py @@ -48,6 +48,11 @@ quantum_opts = [ default='keystone', help='auth strategy for connecting to ' 'quantum in admin context'), + # TODO(berrange) temporary hack until Quantum can pass over the + # name of the OVS bridge it is configured with + cfg.StrOpt('quantum_ovs_bridge', + default='br-int', + help='Name of Integration Bridge used by Open vSwitch'), ] CONF = cfg.CONF @@ -99,7 +104,26 @@ class API(base.Base): return nets def allocate_for_instance(self, context, instance, **kwargs): - """Allocate all network resources for the instance.""" + """Allocate all network resources for the instance. + + TODO(someone): document the rest of these parameters. + + :param macs: None or a set of MAC addresses that the instance + should use. macs is supplied by the hypervisor driver (contrast + with requested_networks which is user supplied). + NB: QuantumV2 currently assigns hypervisor supplied MAC addresses + to arbitrary networks, which requires openflow switches to + function correctly if more than one network is being used with + the bare metal hypervisor (which is the only one known to limit + MAC addresses). + """ + hypervisor_macs = kwargs.get('macs', None) + available_macs = None + if hypervisor_macs is not None: + # Make a copy we can mutate: records macs that have not been used + # to create a port on a network. If we find a mac with a + # pre-allocated port we also remove it from this set. + available_macs = set(hypervisor_macs) quantum = quantumv2.get_client(context) LOG.debug(_('allocate_for_instance() for %s'), instance['display_name']) @@ -114,7 +138,17 @@ class API(base.Base): if requested_networks: for network_id, fixed_ip, port_id in requested_networks: if port_id: - port = quantum.show_port(port_id).get('port') + port = quantum.show_port(port_id)['port'] + if hypervisor_macs is not None: + if port['mac_address'] not in hypervisor_macs: + raise exception.PortNotUsable(port_id=port_id, + instance=instance['display_name']) + else: + # Don't try to use this MAC if we need to create a + # port on the fly later. Identical MACs may be + # configured by users into multiple ports so we + # discard rather than popping. + available_macs.discard(port['mac_address']) network_id = port['network_id'] ports[network_id] = port elif fixed_ip: @@ -123,7 +157,6 @@ class API(base.Base): nets = self._get_available_networks(context, instance['project_id'], net_ids) - touched_port_ids = [] created_port_ids = [] for network in nets: @@ -143,6 +176,12 @@ class API(base.Base): port_req_body['port']['network_id'] = network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = instance['project_id'] + if available_macs is not None: + if not available_macs: + raise exception.PortNotFree( + instance=instance['display_name']) + mac_address = available_macs.pop() + port_req_body['port']['mac_address'] = mac_address created_port_ids.append( quantum.create_port(port_req_body)['port']['id']) except Exception: @@ -199,11 +238,62 @@ class API(base.Base): def add_fixed_ip_to_instance(self, context, instance, network_id): """Add a fixed ip to the instance from specified network.""" - raise NotImplementedError() + search_opts = {'network_id': network_id} + data = quantumv2.get_client(context).list_subnets(**search_opts) + ipam_subnets = data.get('subnets', []) + if not ipam_subnets: + raise exception.NetworkNotFoundForInstance( + instance_id=instance['uuid']) + + zone = 'compute:%s' % instance['availability_zone'] + search_opts = {'device_id': instance['uuid'], + 'device_owner': zone, + 'network_id': network_id} + data = quantumv2.get_client(context).list_ports(**search_opts) + ports = data['ports'] + for p in ports: + fixed_ips = p['fixed_ips'] + for subnet in ipam_subnets: + fixed_ip = {'subnet_id': subnet['id']} + fixed_ips.append(fixed_ip) + port_req_body = {'port': {'fixed_ips': fixed_ips}} + try: + quantumv2.get_client(context).update_port(p['id'], + port_req_body) + except Exception as ex: + msg = _("Unable to update port %(portid)s with" + " failure: %(exception)s") + LOG.debug(msg, {'portid': p['id'], 'exception': ex}) + return + raise exception.NetworkNotFoundForInstance( + instance_id=instance['uuid']) def remove_fixed_ip_from_instance(self, context, instance, address): """Remove a fixed ip from the instance.""" - raise NotImplementedError() + zone = 'compute:%s' % instance['availability_zone'] + search_opts = {'device_id': instance['uuid'], + 'device_owner': zone, + 'fixed_ips': 'ip_address=%s' % address} + data = quantumv2.get_client(context).list_ports(**search_opts) + ports = data['ports'] + for p in ports: + fixed_ips = p['fixed_ips'] + new_fixed_ips = [] + for fixed_ip in fixed_ips: + if fixed_ip['ip_address'] != address: + new_fixed_ips.append(fixed_ip) + port_req_body = {'port': {'fixed_ips': new_fixed_ips}} + try: + quantumv2.get_client(context).update_port(p['id'], + port_req_body) + except Exception as ex: + msg = _("Unable to update port %(portid)s with" + " failure: %(exception)s") + LOG.debug(msg, {'portid': p['id'], 'exception': ex}) + return + + raise exception.FixedIpNotFoundForSpecificInstance( + instance_uuid=instance['uuid'], ip=address) def validate_networks(self, context, requested_networks): """Validate that the tenant can use the requested networks.""" @@ -570,9 +660,24 @@ class API(base.Base): subnet['ips'] = [fixed_ip for fixed_ip in network_IPs if fixed_ip.is_in_subnet(subnet)] + bridge = None + vif_type = port.get('binding:vif_type') + # TODO(berrange) Quantum should pass the bridge name + # in another binding metadata field + if vif_type == network_model.VIF_TYPE_OVS: + bridge = CONF.quantum_ovs_bridge + elif vif_type == network_model.VIF_TYPE_BRIDGE: + bridge = "brq" + port['network_id'] + + if bridge is not None: + bridge = bridge[:network_model.NIC_NAME_LEN] + + devname = "tap" + port['id'] + devname = devname[:network_model.NIC_NAME_LEN] + network = network_model.Network( id=port['network_id'], - bridge='', # Quantum ignores this field + bridge=bridge, injected=CONF.flat_injected, label=network_name, tenant_id=net['tenant_id'] @@ -582,7 +687,8 @@ class API(base.Base): id=port['id'], address=port['mac_address'], network=network, - type=port.get('binding:vif_type'))) + type=port.get('binding:vif_type'), + devname=devname)) return nw_info def _get_subnets_from_port(self, context, port): diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py index 2f52add57..a7bffe17a 100644 --- a/nova/network/rpcapi.py +++ b/nova/network/rpcapi.py @@ -45,6 +45,7 @@ class NetworkAPI(rpc_proxy.RpcProxy): 1.4 - Add get_backdoor_port() 1.5 - Adds associate 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip + 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools ''' # @@ -94,8 +95,9 @@ class NetworkAPI(rpc_proxy.RpcProxy): def get_floating_ip(self, ctxt, id): return self.call(ctxt, self.make_msg('get_floating_ip', id=id)) - def get_floating_pools(self, ctxt): - return self.call(ctxt, self.make_msg('get_floating_pools')) + def get_floating_ip_pools(self, ctxt): + return self.call(ctxt, self.make_msg('get_floating_ip_pools'), + version="1.7") def get_floating_ip_by_address(self, ctxt, address): return self.call(ctxt, self.make_msg('get_floating_ip_by_address', diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py index ad1f2a8a6..534a610c0 100644 --- a/nova/openstack/common/cfg.py +++ b/nova/openstack/common/cfg.py @@ -217,7 +217,7 @@ log files:: ... ] -This module also contains a global instance of the CommonConfigOpts class +This module also contains a global instance of the ConfigOpts class in order to support a common usage pattern in OpenStack:: from nova.openstack.common import cfg @@ -236,10 +236,11 @@ in order to support a common usage pattern in OpenStack:: Positional command line arguments are supported via a 'positional' Opt constructor argument:: - >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True)) + >>> conf = ConfigOpts() + >>> conf.register_cli_opt(MultiStrOpt('bar', positional=True)) True - >>> CONF(['a', 'b']) - >>> CONF.bar + >>> conf(['a', 'b']) + >>> conf.bar ['a', 'b'] It is also possible to use argparse "sub-parsers" to parse additional @@ -249,10 +250,11 @@ command line arguments using the SubCommandOpt class: ... list_action = subparsers.add_parser('list') ... list_action.add_argument('id') ... - >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers)) + >>> conf = ConfigOpts() + >>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers)) True - >>> CONF(['list', '10']) - >>> CONF.action.name, CONF.action.id + >>> conf(args=['list', '10']) + >>> conf.action.name, conf.action.id ('list', '10') """ @@ -1726,62 +1728,4 @@ class ConfigOpts(collections.Mapping): return value -class CommonConfigOpts(ConfigOpts): - - DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" - DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - common_cli_opts = [ - BoolOpt('debug', - short='d', - default=False, - help='Print debugging output'), - BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output'), - ] - - logging_cli_opts = [ - StrOpt('log-config', - metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - StrOpt('log-format', - default=DEFAULT_LOG_FORMAT, - metavar='FORMAT', - help='A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'Default: %(default)s'), - StrOpt('log-date-format', - default=DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If not set, logging will go to stdout.'), - StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The directory to keep log files in ' - '(will be prepended to --log-file)'), - BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') - ] - - def __init__(self): - super(CommonConfigOpts, self).__init__() - self.register_cli_opts(self.common_cli_opts) - self.register_cli_opts(self.logging_cli_opts) - - -CONF = CommonConfigOpts() +CONF = ConfigOpts() diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py index f18e84f6d..118385427 100644 --- a/nova/openstack/common/eventlet_backdoor.py +++ b/nova/openstack/common/eventlet_backdoor.py @@ -46,7 +46,7 @@ def _find_objects(t): def _print_greenthreads(): - for i, gt in enumerate(find_objects(greenlet.greenlet)): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): print i, gt traceback.print_stack(gt.gr_frame) print diff --git a/nova/openstack/common/iniparser.py b/nova/openstack/common/iniparser.py index 241284449..9bf399f0c 100644 --- a/nova/openstack/common/iniparser.py +++ b/nova/openstack/common/iniparser.py @@ -54,7 +54,7 @@ class BaseParser(object): value = value.strip() if ((value and value[0] == value[-1]) and - (value[0] == "\"" or value[0] == "'")): + (value[0] == "\"" or value[0] == "'")): value = value[1:-1] return key.strip(), [value] diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py index ba390dc69..6f80a1f67 100644 --- a/nova/openstack/common/lockutils.py +++ b/nova/openstack/common/lockutils.py @@ -28,6 +28,7 @@ from eventlet import semaphore from nova.openstack.common import cfg from nova.openstack.common import fileutils +from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging @@ -219,6 +220,11 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None): 'method': f.__name__}) retval = f(*args, **kwargs) finally: + LOG.debug(_('Released file lock "%(lock)s" at %(path)s' + ' for method "%(method)s"...'), + {'lock': name, + 'path': lock_file_path, + 'method': f.__name__}) # NOTE(vish): This removes the tempdir if we needed # to create one. This is used to cleanup # the locks left behind by unit tests. diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py index 6e25bb597..32513bb32 100644 --- a/nova/openstack/common/log.py +++ b/nova/openstack/common/log.py @@ -47,21 +47,82 @@ from nova.openstack.common import local from nova.openstack.common import notifier +_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=_DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --log-file)'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + log_opts = [ cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)d %(levelname)s %(name)s ' + default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s ' '[%(request_id)s %(user)s %(tenant)s] %(instance)s' '%(message)s', help='format string to use for log messages with context'), cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)d %(process)d %(levelname)s ' + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' '%(name)s [-] %(instance)s%(message)s', help='format string to use for log messages without context'), cfg.StrOpt('logging_debug_format_suffix', default='%(funcName)s %(pathname)s:%(lineno)d', help='data to append to log format when level is DEBUG'), cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s ' + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' '%(instance)s', help='prefix each line of exception output with this format'), cfg.ListOpt('default_log_levels', @@ -94,24 +155,9 @@ log_opts = [ 'format it like this'), ] - -generic_log_opts = [ - cfg.StrOpt('logdir', - default=None, - help='Log output to a per-service log file in named directory'), - cfg.StrOpt('logfile', - default=None, - help='Log output to a named file'), - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error'), - cfg.StrOpt('logfile_mode', - default='0644', - help='Default file mode used when creating log files'), -] - - CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) CONF.register_opts(generic_log_opts) CONF.register_opts(log_opts) @@ -149,8 +195,8 @@ def _get_binary_name(): def _get_log_file_path(binary=None): - logfile = CONF.log_file or CONF.logfile - logdir = CONF.log_dir or CONF.logdir + logfile = CONF.log_file + logdir = CONF.log_dir if logfile and not logdir: return logfile @@ -259,7 +305,7 @@ class JSONFormatter(logging.Formatter): class PublishErrorsHandler(logging.Handler): def emit(self, record): if ('nova.openstack.common.notifier.log_notifier' in - CONF.notification_driver): + CONF.notification_driver): return notifier.api.notify(None, 'error.publisher', 'error_notification', @@ -361,10 +407,12 @@ def _setup_logging_from_conf(product_name): datefmt=datefmt)) handler.setFormatter(LegacyFormatter(datefmt=datefmt)) - if CONF.verbose or CONF.debug: + if CONF.debug: log_root.setLevel(logging.DEBUG) - else: + elif CONF.verbose: log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) level = logging.NOTSET for pair in CONF.default_log_levels: @@ -425,7 +473,7 @@ class LegacyFormatter(logging.Formatter): self._fmt = CONF.logging_default_format_string if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): + CONF.logging_debug_format_suffix): self._fmt += " " + CONF.logging_debug_format_suffix # Cache this on the record, Logger will respect our formated copy diff --git a/nova/rootwrap/__init__.py b/nova/openstack/common/rootwrap/__init__.py index 671d3c173..671d3c173 100644 --- a/nova/rootwrap/__init__.py +++ b/nova/openstack/common/rootwrap/__init__.py diff --git a/nova/rootwrap/filters.py b/nova/openstack/common/rootwrap/filters.py index 8958f1ba1..905bbabea 100644 --- a/nova/rootwrap/filters.py +++ b/nova/openstack/common/rootwrap/filters.py @@ -20,7 +20,7 @@ import re class CommandFilter(object): - """Command filter only checking that the 1st argument matches exec_path.""" + """Command filter only checking that the 1st argument matches exec_path""" def __init__(self, exec_path, run_as, *args): self.name = '' @@ -30,7 +30,7 @@ class CommandFilter(object): self.real_exec = None def get_exec(self, exec_dirs=[]): - """Returns existing executable, or empty string if none found.""" + """Returns existing executable, or empty string if none found""" if self.real_exec is not None: return self.real_exec self.real_exec = "" @@ -46,7 +46,7 @@ class CommandFilter(object): return self.real_exec def match(self, userargs): - """Only check that the first argument (command) matches exec_path.""" + """Only check that the first argument (command) matches exec_path""" if (os.path.basename(self.exec_path) == userargs[0]): return True return False @@ -60,12 +60,12 @@ class CommandFilter(object): return [to_exec] + userargs[1:] def get_environment(self, userargs): - """Returns specific environment to set, None if none.""" + """Returns specific environment to set, None if none""" return None class RegExpFilter(CommandFilter): - """Command filter doing regexp matching for every argument.""" + """Command filter doing regexp matching for every argument""" def match(self, userargs): # Early skip if command or number of args don't match @@ -89,15 +89,15 @@ class RegExpFilter(CommandFilter): class DnsmasqFilter(CommandFilter): - """Specific filter for the dnsmasq call (which includes env).""" + """Specific filter for the dnsmasq call (which includes env)""" CONFIG_FILE_ARG = 'CONFIG_FILE' def match(self, userargs): if (userargs[0] == 'env' and - userargs[1].startswith(self.CONFIG_FILE_ARG) and - userargs[2].startswith('NETWORK_ID=') and - userargs[3] == 'dnsmasq'): + userargs[1].startswith(self.CONFIG_FILE_ARG) and + userargs[2].startswith('NETWORK_ID=') and + userargs[3] == 'dnsmasq'): return True return False @@ -114,7 +114,7 @@ class DnsmasqFilter(CommandFilter): class DeprecatedDnsmasqFilter(DnsmasqFilter): - """Variant of dnsmasq filter to support old-style FLAGFILE.""" + """Variant of dnsmasq filter to support old-style FLAGFILE""" CONFIG_FILE_ARG = 'FLAGFILE' @@ -164,7 +164,7 @@ class KillFilter(CommandFilter): class ReadFileFilter(CommandFilter): - """Specific filter for the utils.read_file_as_root call.""" + """Specific filter for the utils.read_file_as_root call""" def __init__(self, file_path, *args): self.file_path = file_path diff --git a/nova/rootwrap/wrapper.py b/nova/openstack/common/rootwrap/wrapper.py index 70bd63c47..4452177fe 100644 --- a/nova/rootwrap/wrapper.py +++ b/nova/openstack/common/rootwrap/wrapper.py @@ -22,7 +22,7 @@ import logging.handlers import os import string -from nova.rootwrap import filters +from nova.openstack.common.rootwrap import filters class NoFilterMatched(Exception): @@ -93,7 +93,7 @@ def setup_syslog(execname, facility, level): def build_filter(class_name, *args): - """Returns a filter object of class class_name.""" + """Returns a filter object of class class_name""" if not hasattr(filters, class_name): logging.warning("Skipping unknown filter class (%s) specified " "in filter definitions" % class_name) @@ -103,7 +103,7 @@ def build_filter(class_name, *args): def load_filters(filters_path): - """Load filters from a list of directories.""" + """Load filters from a list of directories""" filterlist = [] for filterdir in filters_path: if not os.path.isdir(filterdir): diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py index bf38201f5..305dc7877 100644 --- a/nova/openstack/common/rpc/impl_kombu.py +++ b/nova/openstack/common/rpc/impl_kombu.py @@ -175,7 +175,7 @@ class ConsumerBase(object): try: self.queue.cancel(self.tag) except KeyError, e: - # NOTE(comstud): Kludge to get around a amqplib bug + # NOTE(comstud): Kludge to get around an amqplib bug if str(e) != "u'%s'" % self.tag: raise self.queue = None diff --git a/nova/quota.py b/nova/quota.py index 96e612503..1856c97c1 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -965,6 +965,7 @@ class QuotaEngine(object): # logged, however, because this is less than optimal. LOG.exception(_("Failed to commit reservations " "%(reservations)s") % locals()) + LOG.debug(_("Committed reservations %(reservations)s") % locals()) def rollback(self, context, reservations, project_id=None): """Roll back reservations. @@ -986,6 +987,7 @@ class QuotaEngine(object): # logged, however, because this is less than optimal. LOG.exception(_("Failed to roll back reservations " "%(reservations)s") % locals()) + LOG.debug(_("Rolled back reservations %(reservations)s") % locals()) def usage_reset(self, context, resources): """ diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index dc494af8f..09de10388 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -56,8 +56,6 @@ CONF.register_opts(scheduler_driver_opts) def handle_schedule_error(context, ex, instance_uuid, request_spec): if not isinstance(ex, exception.NoValidHost): LOG.exception(_("Exception during scheduler.run_instance")) - compute_utils.add_instance_fault_from_exc(context, - instance_uuid, ex, sys.exc_info()) state = vm_states.ERROR.upper() LOG.warning(_('Setting instance to %(state)s state.'), locals(), instance_uuid=instance_uuid) @@ -68,6 +66,8 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec): 'task_state': None}) notifications.send_update(context, old_ref, new_ref, service="scheduler") + compute_utils.add_instance_fault_from_exc(context, + new_ref, ex, sys.exc_info()) properties = request_spec.get('instance_properties', {}) payload = dict(request_spec=request_spec, @@ -192,12 +192,12 @@ class Scheduler(object): # Checking src host exists and compute node src = instance_ref['host'] try: - services = db.service_get_all_compute_by_host(context, src) + service = db.service_get_by_compute_host(context, src) except exception.NotFound: raise exception.ComputeServiceUnavailable(host=src) # Checking src host is alive. - if not self.servicegroup_api.service_is_up(services[0]): + if not self.servicegroup_api.service_is_up(service): raise exception.ComputeServiceUnavailable(host=src) def _live_migration_dest_check(self, context, instance_ref, dest): @@ -209,8 +209,7 @@ class Scheduler(object): """ # Checking dest exists and compute node. - dservice_refs = db.service_get_all_compute_by_host(context, dest) - dservice_ref = dservice_refs[0] + dservice_ref = db.service_get_by_compute_host(context, dest) # Checking dest host is alive. if not self.servicegroup_api.service_is_up(dservice_ref): @@ -290,5 +289,5 @@ class Scheduler(object): :return: value specified by key """ - compute_node_ref = db.service_get_all_compute_by_host(context, host) - return compute_node_ref[0]['compute_node'][0] + service_ref = db.service_get_by_compute_host(context, host) + return service_ref['compute_node'][0] diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index 4d0f2305f..302d2b3a8 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -48,9 +48,12 @@ import httplib import socket import ssl +from nova import context +from nova import db from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging +from nova.openstack.common import timeutils from nova.scheduler import filters @@ -78,6 +81,9 @@ trusted_opts = [ deprecated_name='auth_blob', default=None, help='attestation authorization blob - must change'), + cfg.IntOpt('attestation_auth_timeout', + default=60, + help='Attestation status cache valid period length'), ] CONF = cfg.CONF @@ -119,7 +125,7 @@ class HTTPSClientAuthConnection(httplib.HTTPSConnection): cert_reqs=ssl.CERT_REQUIRED) -class AttestationService(httplib.HTTPSConnection): +class AttestationService(object): # Provide access wrapper to attestation server to get integrity report. def __init__(self): @@ -156,10 +162,10 @@ class AttestationService(httplib.HTTPSConnection): except (socket.error, IOError) as e: return IOError, None - def _request(self, cmd, subcmd, host): + def _request(self, cmd, subcmd, hosts): body = {} - body['count'] = 1 - body['hosts'] = host + body['count'] = len(hosts) + body['hosts'] = hosts cooked = jsonutils.dumps(body) headers = {} headers['content-type'] = 'application/json' @@ -173,39 +179,124 @@ class AttestationService(httplib.HTTPSConnection): else: return status, None - def _check_trust(self, data, host): - for item in data: - for state in item['hosts']: - if state['host_name'] == host: - return state['trust_lvl'] - return "" + def do_attestation(self, hosts): + """Attests compute nodes through OAT service. - def do_attestation(self, host): - state = [] - status, data = self._request("POST", "PollHosts", host) - if status != httplib.OK: - return {} - state.append(data) - return self._check_trust(state, host) + :param hosts: hosts list to be attested + :returns: dictionary for trust level and validate time + """ + result = None + status, data = self._request("POST", "PollHosts", hosts) + if data != None: + result = data.get('hosts') -class TrustedFilter(filters.BaseHostFilter): - """Trusted filter to support Trusted Compute Pools.""" + return result + + +class ComputeAttestationCache(object): + """Cache for compute node attestation + + Cache compute node's trust level for sometime, + if the cache is out of date, poll OAT service to flush the + cache. + + OAT service may have cache also. OAT service's cache valid time + should be set shorter than trusted filter's cache valid time. + """ def __init__(self): - self.attestation_service = AttestationService() + self.attestservice = AttestationService() + self.compute_nodes = {} + admin = context.get_admin_context() + + # Fetch compute node list to initialize the compute_nodes, + # so that we don't need poll OAT service one by one for each + # host in the first round that scheduler invokes us. + computes = db.compute_node_get_all(admin) + for compute in computes: + service = compute['service'] + if not service: + LOG.warn(_("No service for compute ID %s") % compute['id']) + continue + host = service['host'] + self._init_cache_entry(host) + + def _cache_valid(self, host): + cachevalid = False + if host in self.compute_nodes: + node_stats = self.compute_nodes.get(host) + if not timeutils.is_older_than( + node_stats['vtime'], + CONF.trusted_computing.attestation_auth_timeout): + cachevalid = True + return cachevalid + + def _init_cache_entry(self, host): + self.compute_nodes[host] = { + 'trust_lvl': 'unknown', + 'vtime': timeutils.normalize_time( + timeutils.parse_isotime("1970-01-01T00:00:00Z"))} + + def _invalidate_caches(self): + for host in self.compute_nodes: + self._init_cache_entry(host) + + def _update_cache_entry(self, state): + entry = {} + + host = state['host_name'] + entry['trust_lvl'] = state['trust_lvl'] - def _is_trusted(self, host, trust): - level = self.attestation_service.do_attestation(host) - LOG.debug(_("TCP: trust state of " - "%(host)s:%(level)s(%(trust)s)") % locals()) + try: + # Normalize as naive object to interoperate with utcnow(). + entry['vtime'] = timeutils.normalize_time( + timeutils.parse_isotime(state['vtime'])) + except ValueError: + # Mark the system as un-trusted if get invalid vtime. + entry['trust_lvl'] = 'unknown' + entry['vtime'] = timeutils.utcnow() + + self.compute_nodes[host] = entry + + def _update_cache(self): + self._invalidate_caches() + states = self.attestservice.do_attestation(self.compute_nodes.keys()) + if states is None: + return + for state in states: + self._update_cache_entry(state) + + def get_host_attestation(self, host): + """Check host's trust level.""" + if not host in self.compute_nodes: + self._init_cache_entry(host) + if not self._cache_valid(host): + self._update_cache() + level = self.compute_nodes.get(host).get('trust_lvl') + return level + + +class ComputeAttestation(object): + def __init__(self): + self.caches = ComputeAttestationCache() + + def is_trusted(self, host, trust): + level = self.caches.get_host_attestation(host) return trust == level + +class TrustedFilter(filters.BaseHostFilter): + """Trusted filter to support Trusted Compute Pools.""" + + def __init__(self): + self.compute_attestation = ComputeAttestation() + def host_passes(self, host_state, filter_properties): instance = filter_properties.get('instance_type', {}) extra = instance.get('extra_specs', {}) trust = extra.get('trust:trusted_host') host = host_state.host if trust: - return self._is_trusted(host, trust) + return self.compute_attestation.is_trusted(host, trust) return True diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 033ee9cc8..23e64cd7c 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -180,8 +180,6 @@ class SchedulerManager(manager.Manager): uuids = [properties.get('uuid')] for instance_uuid in request_spec.get('instance_uuids') or uuids: if instance_uuid: - compute_utils.add_instance_fault_from_exc(context, - instance_uuid, ex, sys.exc_info()) state = vm_state.upper() LOG.warning(_('Setting instance to %(state)s state.'), locals(), instance_uuid=instance_uuid) @@ -191,6 +189,8 @@ class SchedulerManager(manager.Manager): context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service="scheduler") + compute_utils.add_instance_fault_from_exc(context, + new_ref, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, @@ -220,13 +220,12 @@ class SchedulerManager(manager.Manager): """ # Getting compute node info and related instances info - compute_ref = db.service_get_all_compute_by_host(context, host) - compute_ref = compute_ref[0] + service_ref = db.service_get_by_compute_host(context, host) instance_refs = db.instance_get_all_by_host(context, - compute_ref['host']) + service_ref['host']) # Getting total available/used resource - compute_ref = compute_ref['compute_node'][0] + compute_ref = service_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], diff --git a/nova/service.py b/nova/service.py index 39e414eb6..87857f93d 100644 --- a/nova/service.py +++ b/nova/service.py @@ -32,7 +32,6 @@ import greenlet from nova import conductor from nova import context -from nova import db from nova import exception from nova.openstack.common import cfg from nova.openstack.common import eventlet_backdoor @@ -62,6 +61,9 @@ service_opts = [ cfg.ListOpt('enabled_apis', default=['ec2', 'osapi_compute', 'metadata'], help='a list of APIs to enable by default'), + cfg.ListOpt('enabled_ssl_apis', + default=[], + help='a list of APIs with enabled SSL'), cfg.StrOpt('ec2_listen', default="0.0.0.0", help='IP address for EC2 API to listen'), @@ -400,6 +402,14 @@ class Service(object): self.binary = binary self.topic = topic self.manager_class_name = manager + # NOTE(russellb) We want to make sure to create the servicegroup API + # instance early, before creating other things such as the manager, + # that will also create a servicegroup API instance. Internally, the + # servicegroup only allocates a single instance of the driver API and + # we want to make sure that our value of db_allowed is there when it + # gets created. For that to happen, this has to be the first instance + # of the servicegroup API. + self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval @@ -409,10 +419,8 @@ class Service(object): self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] self.backdoor_port = None - self.db_allowed = db_allowed self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context()) - self.servicegroup_api = servicegroup.API() def start(self): verstr = version.version_string_with_package() @@ -422,12 +430,11 @@ class Service(object): self.model_disconnected = False ctxt = context.get_admin_context() try: - service_ref = self.conductor_api.service_get_by_args(ctxt, - self.host, - self.binary) - self.service_id = service_ref['id'] + self.service_ref = self.conductor_api.service_get_by_args(ctxt, + self.host, self.binary) + self.service_id = self.service_ref['id'] except exception.NotFound: - self._create_service_ref(ctxt) + self.service_ref = self._create_service_ref(ctxt) if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port @@ -480,6 +487,7 @@ class Service(object): } service = self.conductor_api.service_create(context, svc_values) self.service_id = service['id'] + return service def __getattr__(self, key): manager = self.__dict__.get('manager', None) @@ -566,7 +574,7 @@ class Service(object): class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" - def __init__(self, name, loader=None): + def __init__(self, name, loader=None, use_ssl=False): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. @@ -581,10 +589,12 @@ class WSGIService(object): self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = getattr(CONF, '%s_workers' % name, None) + self.use_ssl = use_ssl self.server = wsgi.Server(name, self.app, host=self.host, - port=self.port) + port=self.port, + use_ssl=self.use_ssl) # Pull back actual port used self.port = self.server.port self.backdoor_port = None diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py index ebd0ee6ac..358b7dcbc 100644 --- a/nova/servicegroup/api.py +++ b/nova/servicegroup/api.py @@ -45,6 +45,15 @@ class API(object): @lockutils.synchronized('nova.servicegroup.api.new', 'nova-') def __new__(cls, *args, **kwargs): + '''Create an instance of the servicegroup API. + + args and kwargs are passed down to the servicegroup driver when it gets + created. No args currently exist, though. Valid kwargs are: + + db_allowed - Boolean. False if direct db access is not allowed and + alternative data access (conductor) should be used + instead. + ''' if not cls._driver: LOG.debug(_('ServiceGroup driver defined as an instance of %s'), @@ -55,7 +64,8 @@ class API(object): except KeyError: raise TypeError(_("unknown ServiceGroup driver name: %s") % driver_name) - cls._driver = importutils.import_object(driver_class) + cls._driver = importutils.import_object(driver_class, + *args, **kwargs) utils.check_isinstance(cls._driver, ServiceGroupDriver) # we don't have to check that cls._driver is not NONE, # check_isinstance does it diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py index 075db3ed8..686ee728b 100644 --- a/nova/servicegroup/drivers/db.py +++ b/nova/servicegroup/drivers/db.py @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nova import conductor from nova import context -from nova import db from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging @@ -32,6 +32,10 @@ LOG = logging.getLogger(__name__) class DbDriver(api.ServiceGroupDriver): + def __init__(self, *args, **kwargs): + self.db_allowed = kwargs.get('db_allowed', True) + self.conductor_api = conductor.API(use_local=self.db_allowed) + def join(self, member_id, group_id, service=None): """Join the given service with it's group.""" @@ -53,6 +57,11 @@ class DbDriver(api.ServiceGroupDriver): Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] + if isinstance(last_heartbeat, basestring): + # NOTE(russellb) If this service_ref came in over rpc via + # conductor, then the timestamp will be a string and needs to be + # converted back to a datetime. + last_heartbeat = timeutils.parse_strtime(last_heartbeat) # Timestamps in DB are UTC. elapsed = utils.total_seconds(timeutils.utcnow() - last_heartbeat) LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s', @@ -66,7 +75,8 @@ class DbDriver(api.ServiceGroupDriver): LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id) rs = [] ctxt = context.get_admin_context() - for service in db.service_get_all_by_topic(ctxt, group_id): + services = self.conductor_api.service_get_all_by_topic(ctxt, group_id) + for service in services: if self.is_up(service): rs.append(service['host']) return rs @@ -76,18 +86,11 @@ class DbDriver(api.ServiceGroupDriver): ctxt = context.get_admin_context() state_catalog = {} try: - try: - service_ref = db.service_get(ctxt, service.service_id) - except exception.NotFound: - LOG.debug(_('The service database object disappeared, ' - 'Recreating it.')) - service._create_service_ref(ctxt) - service_ref = db.service_get(ctxt, service.service_id) - - state_catalog['report_count'] = service_ref['report_count'] + 1 + report_count = service.service_ref['report_count'] + 1 + state_catalog['report_count'] = report_count - db.service_update(ctxt, - service.service_id, state_catalog) + service.service_ref = self.conductor_api.service_update(ctxt, + service.service_ref, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): diff --git a/nova/spice/__init__.py b/nova/spice/__init__.py new file mode 100644 index 000000000..390957e27 --- /dev/null +++ b/nova/spice/__init__.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for SPICE Proxying.""" + +from nova.openstack.common import cfg + + +spice_opts = [ + cfg.StrOpt('html5proxy_base_url', + default='http://127.0.0.1:6080/spice_auto.html', + help='location of spice html5 console proxy, in the form ' + '"http://127.0.0.1:6080/spice_auto.html"'), + cfg.StrOpt('server_listen', + default='127.0.0.1', + help='IP address on which instance spice server should listen'), + cfg.StrOpt('server_proxyclient_address', + default='127.0.0.1', + help='the address to which proxy clients ' + '(like nova-spicehtml5proxy) should connect'), + cfg.BoolOpt('enabled', + default=False, + help='enable spice related features'), + cfg.BoolOpt('agent_enabled', + default=True, + help='enable spice guest agent support'), + cfg.StrOpt('keymap', + default='en-us', + help='keymap for spice'), + ] + +CONF = cfg.CONF +CONF.register_opts(spice_opts, group='spice') diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index d403ba1f0..5e5723a08 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -18,9 +18,10 @@ # under the License. import copy -import tempfile import uuid +import fixtures + from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.compute import api as compute_api @@ -86,7 +87,7 @@ def get_instances_with_cached_ips(orig_func, *args, **kwargs): class CinderCloudTestCase(test.TestCase): def setUp(self): super(CinderCloudTestCase, self).setUp() - vol_tmpdir = tempfile.mkdtemp() + vol_tmpdir = self.useFixture(fixtures.TempDir()).path self.flags(compute_driver='nova.virt.fake.FakeDriver', volume_api_class='nova.tests.fake_volume.API') diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index b30a3ddeb..a00dceff1 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -30,6 +30,7 @@ import fixtures from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state +from nova.api.metadata import password from nova.compute import api as compute_api from nova.compute import power_state from nova.compute import utils as compute_utils @@ -1387,6 +1388,17 @@ class CloudTestCase(test.TestCase): instance_id = rv['instancesSet'][0]['instanceId'] return instance_id + def test_get_password_data(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=CONF.default_instance_type, + max_count=1) + self.stubs.Set(password, 'extract_password', lambda i: 'fakepass') + output = self.cloud.get_password_data(context=self.context, + instance_id=[instance_id]) + self.assertEquals(output['passwordData'], 'fakepass') + rv = self.cloud.terminate_instances(self.context, [instance_id]) + def test_console_output(self): instance_id = self._run_instance( image_id='ami-1', @@ -1428,7 +1440,7 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) def test_describe_bad_key_pairs(self): - self.assertRaises(exception.EC2APIError, + self.assertRaises(exception.KeypairNotFound, self.cloud.describe_key_pairs, self.context, key_name=['DoesNotExist']) @@ -1478,7 +1490,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(result['keyName'], key_name) for key_name in bad_names: - self.assertRaises(exception.EC2APIError, + self.assertRaises(exception.InvalidKeypair, self.cloud.create_key_pair, self.context, key_name) diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py new file mode 100644 index 000000000..4e577e1f5 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py @@ -0,0 +1,89 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Compute admin api w/ Cells +""" + +from nova.api.openstack.compute.contrib import admin_actions +from nova.compute import cells_api as compute_cells_api +from nova.compute import vm_states +from nova.openstack.common import log as logging +from nova.openstack.common import uuidutils +from nova import test +from nova.tests.api.openstack import fakes + +LOG = logging.getLogger('nova.tests.test_compute_cells') + +INSTANCE_IDS = {'inst_id': 1} + + +class CellsAdminAPITestCase(test.TestCase): + + def setUp(self): + super(CellsAdminAPITestCase, self).setUp() + + def _fake_cell_read_only(*args, **kwargs): + return False + + def _fake_validate_cell(*args, **kwargs): + return + + def _fake_compute_api_get(context, instance_id): + return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE, + 'task_state': None, 'cell_name': None} + + def _fake_instance_update_and_get_original(context, instance_uuid, + values): + inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid), + name=values.get('display_name')) + return (inst, inst) + + def fake_cast_to_cells(context, instance, method, *args, **kwargs): + """ + Makes sure that the cells receive the cast to update + the cell state + """ + self.cells_received_kwargs.update(kwargs) + + self.admin_api = admin_actions.AdminActionsController() + self.admin_api.compute_api = compute_cells_api.ComputeCellsAPI() + self.stubs.Set(self.admin_api.compute_api, '_cell_read_only', + _fake_cell_read_only) + self.stubs.Set(self.admin_api.compute_api, '_validate_cell', + _fake_validate_cell) + self.stubs.Set(self.admin_api.compute_api, 'get', + _fake_compute_api_get) + self.stubs.Set(self.admin_api.compute_api.db, + 'instance_update_and_get_original', + _fake_instance_update_and_get_original) + self.stubs.Set(self.admin_api.compute_api, '_cast_to_cells', + fake_cast_to_cells) + + self.uuid = uuidutils.generate_uuid() + url = '/fake/servers/%s/action' % self.uuid + self.request = fakes.HTTPRequest.blank(url) + self.cells_received_kwargs = {} + + def test_reset_active(self): + body = {"os-resetState": {"state": "error"}} + result = self.admin_api._reset_state(self.request, 'inst_id', body) + + self.assertEqual(result.status_int, 202) + # Make sure the cells received the update + self.assertEqual(self.cells_received_kwargs, + dict(vm_state=vm_states.ERROR, + task_state=None)) diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py new file mode 100644 index 000000000..8abe7f388 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py @@ -0,0 +1,244 @@ +# Copyright 2012 IBM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime +from lxml import etree +import webob + +from nova.api.openstack.compute.contrib import availability_zone +from nova import availability_zones +from nova import context +from nova import db +from nova.openstack.common import jsonutils +from nova import servicegroup +from nova import test +from nova.tests.api.openstack import fakes + + +def fake_service_get_all(context, disabled=None): + def __fake_service(binary, availability_zone, + created_at, updated_at, host, disabled): + return {'binary': binary, + 'availability_zone': availability_zone, + 'available_zones': availability_zone, + 'created_at': created_at, + 'updated_at': updated_at, + 'host': host, + 'disabled': disabled} + + if disabled: + return [__fake_service("nova-compute", "zone-2", + datetime(2012, 11, 14, 9, 53, 25, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-scheduler", "internal", + datetime(2012, 11, 14, 9, 57, 3, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-network", "internal", + datetime(2012, 11, 16, 7, 25, 46, 0), + datetime(2012, 12, 26, 14, 45, 24, 0), + "fake_host-2", True)] + else: + return [__fake_service("nova-compute", "zone-1", + datetime(2012, 11, 14, 9, 53, 25, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + __fake_service("nova-sched", "internal", + datetime(2012, 11, 14, 9, 57, 03, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + __fake_service("nova-network", "internal", + datetime(2012, 11, 16, 7, 25, 46, 0), + datetime(2012, 12, 26, 14, 45, 24, 0), + "fake_host-2", False)] + + +def fake_service_is_up(self, service): + return service['binary'] != u"nova-network" + + +def fake_set_availability_zones(context, services): + return services + + +class AvailabilityZoneApiTest(test.TestCase): + def setUp(self): + super(AvailabilityZoneApiTest, self).setUp() + self.stubs.Set(db, 'service_get_all', fake_service_get_all) + self.stubs.Set(availability_zones, 'set_availability_zones', + fake_set_availability_zones) + self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up) + + def test_availability_zone_index(self): + req = webob.Request.blank('/v2/fake/os-availability-zone') + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + resp_dict = jsonutils.loads(resp.body) + + self.assertTrue('availabilityZoneInfo' in resp_dict) + zones = resp_dict['availabilityZoneInfo'] + self.assertEqual(len(zones), 2) + self.assertEqual(zones[0]['zoneName'], u'zone-1') + self.assertTrue(zones[0]['zoneState']['available']) + self.assertIsNone(zones[0]['hosts']) + self.assertEqual(zones[1]['zoneName'], u'zone-2') + self.assertFalse(zones[1]['zoneState']['available']) + self.assertIsNone(zones[1]['hosts']) + + def test_availability_zone_detail(self): + def _formatZone(zone_dict): + result = [] + + # Zone tree view item + result.append({'zoneName': zone_dict['zoneName'], + 'zoneState': u'available' + if zone_dict['zoneState']['available'] else + u'not available'}) + + if zone_dict['hosts'] is not None: + for (host, services) in zone_dict['hosts'].items(): + # Host tree view item + result.append({'zoneName': u'|- %s' % host, + 'zoneState': u''}) + for (svc, state) in services.items(): + # Service tree view item + result.append({'zoneName': u'| |- %s' % svc, + 'zoneState': u'%s %s %s' % ( + 'enabled' if state['active'] else + 'disabled', + ':-)' if state['available'] else + 'XXX', + jsonutils.to_primitive( + state['updated_at']))}) + return result + + def _assertZone(zone, name, status): + self.assertEqual(zone['zoneName'], name) + self.assertEqual(zone['zoneState'], status) + + availabilityZone = availability_zone.AvailabilityZoneController() + + req = webob.Request.blank('/v2/fake/os-availability-zone/detail') + req.method = 'GET' + req.environ['nova.context'] = context.get_admin_context() + resp_dict = availabilityZone.detail(req) + + self.assertTrue('availabilityZoneInfo' in resp_dict) + zones = resp_dict['availabilityZoneInfo'] + self.assertEqual(len(zones), 3) + + ''' availabilityZoneInfo field content in response body: + [{'zoneName': 'zone-1', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-compute': {'active': True, 'available': True, + 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}}, + {'zoneName': 'internal', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-sched': {'active': True, 'available': True, + 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}, + 'fake_host-2': { + 'nova-network': {'active': True, 'available': False, + 'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}}, + {'zoneName': 'zone-2', + 'zoneState': {'available': False}, + 'hosts': None}] + ''' + + l0 = [u'zone-1', u'available'] + l1 = [u'|- fake_host-1', u''] + l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000'] + l3 = [u'internal', u'available'] + l4 = [u'|- fake_host-1', u''] + l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000'] + l6 = [u'|- fake_host-2', u''] + l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000'] + l8 = [u'zone-2', u'not available'] + + z0 = _formatZone(zones[0]) + z1 = _formatZone(zones[1]) + z2 = _formatZone(zones[2]) + + self.assertEqual(len(z0), 3) + self.assertEqual(len(z1), 5) + self.assertEqual(len(z2), 1) + + _assertZone(z0[0], l0[0], l0[1]) + _assertZone(z0[1], l1[0], l1[1]) + _assertZone(z0[2], l2[0], l2[1]) + _assertZone(z1[0], l3[0], l3[1]) + _assertZone(z1[1], l4[0], l4[1]) + _assertZone(z1[2], l5[0], l5[1]) + _assertZone(z1[3], l6[0], l6[1]) + _assertZone(z1[4], l7[0], l7[1]) + _assertZone(z2[0], l8[0], l8[1]) + + +class AvailabilityZoneSerializerTest(test.TestCase): + def test_availability_zone_index_detail_serializer(self): + def _verify_zone(zone_dict, tree): + self.assertEqual(tree.tag, 'availabilityZone') + self.assertEqual(zone_dict['zoneName'], tree.get('name')) + self.assertEqual(str(zone_dict['zoneState']['available']), + tree[0].get('available')) + + for _idx, host_child in enumerate(tree[1]): + self.assertTrue(host_child.get('name') in zone_dict['hosts']) + svcs = zone_dict['hosts'][host_child.get('name')] + for _idx, svc_child in enumerate(host_child[0]): + self.assertTrue(svc_child.get('name') in svcs) + svc = svcs[svc_child.get('name')] + self.assertEqual(len(svc_child), 1) + + self.assertEqual(str(svc['available']), + svc_child[0].get('available')) + self.assertEqual(str(svc['active']), + svc_child[0].get('active')) + self.assertEqual(str(svc['updated_at']), + svc_child[0].get('updated_at')) + + serializer = availability_zone.AvailabilityZonesTemplate() + raw_availability_zones = \ + [{'zoneName': 'zone-1', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-compute': {'active': True, 'available': True, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 25)}}}}, + {'zoneName': 'internal', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-sched': {'active': True, 'available': True, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 25)}}, + 'fake_host-2': { + 'nova-network': {'active': True, + 'available': False, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 24)}}}}, + {'zoneName': 'zone-2', + 'zoneState': {'available': False}, + 'hosts': None}] + + text = serializer.serialize( + dict(availabilityZoneInfo=raw_availability_zones)) + tree = etree.fromstring(text) + + self.assertEqual('availabilityZones', tree.tag) + self.assertEqual(len(raw_availability_zones), len(tree)) + for idx, child in enumerate(tree): + _verify_zone(raw_availability_zones[idx], child) diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py new file mode 100644 index 000000000..82d469524 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_cells.py @@ -0,0 +1,396 @@ +# Copyright 2011-2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from lxml import etree +from webob import exc + +from nova.api.openstack.compute.contrib import cells as cells_ext +from nova.api.openstack import xmlutil +from nova.cells import rpcapi as cells_rpcapi +from nova import context +from nova import db +from nova import exception +from nova.openstack.common import timeutils +from nova import test +from nova.tests.api.openstack import fakes + + +FAKE_CELLS = [ + dict(id=1, name='cell1', username='bob', is_parent=True, + weight_scale=1.0, weight_offset=0.0, + rpc_host='r1.example.org', password='xxxx'), + dict(id=2, name='cell2', username='alice', is_parent=False, + weight_scale=1.0, weight_offset=0.0, + rpc_host='r2.example.org', password='qwerty')] + + +FAKE_CAPABILITIES = [ + {'cap1': '0,1', 'cap2': '2,3'}, + {'cap3': '4,5', 'cap4': '5,6'}] + + +def fake_db_cell_get(context, cell_name): + for cell in FAKE_CELLS: + if cell_name == cell['name']: + return cell + else: + raise exception.CellNotFound(cell_name=cell_name) + + +def fake_db_cell_create(context, values): + cell = dict(id=1) + cell.update(values) + return cell + + +def fake_db_cell_update(context, cell_id, values): + cell = fake_db_cell_get(context, cell_id) + cell.update(values) + return cell + + +def fake_cells_api_get_all_cell_info(*args): + cells = copy.deepcopy(FAKE_CELLS) + del cells[0]['password'] + del cells[1]['password'] + for i, cell in enumerate(cells): + cell['capabilities'] = FAKE_CAPABILITIES[i] + return cells + + +def fake_db_cell_get_all(context): + return FAKE_CELLS + + +class CellsTest(test.TestCase): + def setUp(self): + super(CellsTest, self).setUp() + self.stubs.Set(db, 'cell_get', fake_db_cell_get) + self.stubs.Set(db, 'cell_get_all', fake_db_cell_get_all) + self.stubs.Set(db, 'cell_update', fake_db_cell_update) + self.stubs.Set(db, 'cell_create', fake_db_cell_create) + self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors', + fake_cells_api_get_all_cell_info) + + self.controller = cells_ext.Controller() + self.context = context.get_admin_context() + + def _get_request(self, resource): + return fakes.HTTPRequest.blank('/v2/fake/' + resource) + + def test_index(self): + req = self._get_request("cells") + res_dict = self.controller.index(req) + + self.assertEqual(len(res_dict['cells']), 2) + for i, cell in enumerate(res_dict['cells']): + self.assertEqual(cell['name'], FAKE_CELLS[i]['name']) + self.assertNotIn('capabilitiles', cell) + self.assertNotIn('password', cell) + + def test_detail(self): + req = self._get_request("cells/detail") + res_dict = self.controller.detail(req) + + self.assertEqual(len(res_dict['cells']), 2) + for i, cell in enumerate(res_dict['cells']): + self.assertEqual(cell['name'], FAKE_CELLS[i]['name']) + self.assertEqual(cell['capabilities'], FAKE_CAPABILITIES[i]) + self.assertNotIn('password', cell) + + def test_show_bogus_cell_raises(self): + req = self._get_request("cells/bogus") + self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus') + + def test_get_cell_by_name(self): + req = self._get_request("cells/cell1") + res_dict = self.controller.show(req, 'cell1') + cell = res_dict['cell'] + + self.assertEqual(cell['name'], 'cell1') + self.assertEqual(cell['rpc_host'], 'r1.example.org') + self.assertNotIn('password', cell) + + def test_cell_delete(self): + call_info = {'delete_called': 0} + + def fake_db_cell_delete(context, cell_name): + self.assertEqual(cell_name, 'cell999') + call_info['delete_called'] += 1 + + self.stubs.Set(db, 'cell_delete', fake_db_cell_delete) + + req = self._get_request("cells/cell999") + self.controller.delete(req, 'cell999') + self.assertEqual(call_info['delete_called'], 1) + + def test_delete_bogus_cell_raises(self): + req = self._get_request("cells/cell999") + req.environ['nova.context'] = self.context + self.assertRaises(exc.HTTPNotFound, self.controller.delete, req, + 'cell999') + + def test_cell_create_parent(self): + body = {'cell': {'name': 'meow', + 'username': 'fred', + 'password': 'fubar', + 'rpc_host': 'r3.example.org', + 'type': 'parent', + # Also test this is ignored/stripped + 'is_parent': False}} + + req = self._get_request("cells") + res_dict = self.controller.create(req, body) + cell = res_dict['cell'] + + self.assertEqual(cell['name'], 'meow') + self.assertEqual(cell['username'], 'fred') + self.assertEqual(cell['rpc_host'], 'r3.example.org') + self.assertEqual(cell['type'], 'parent') + self.assertNotIn('password', cell) + self.assertNotIn('is_parent', cell) + + def test_cell_create_child(self): + body = {'cell': {'name': 'meow', + 'username': 'fred', + 'password': 'fubar', + 'rpc_host': 'r3.example.org', + 'type': 'child'}} + + req = self._get_request("cells") + res_dict = self.controller.create(req, body) + cell = res_dict['cell'] + + self.assertEqual(cell['name'], 'meow') + self.assertEqual(cell['username'], 'fred') + self.assertEqual(cell['rpc_host'], 'r3.example.org') + self.assertEqual(cell['type'], 'child') + self.assertNotIn('password', cell) + self.assertNotIn('is_parent', cell) + + def test_cell_create_no_name_raises(self): + body = {'cell': {'username': 'moocow', + 'password': 'secret', + 'rpc_host': 'r3.example.org', + 'type': 'parent'}} + + req = self._get_request("cells") + self.assertRaises(exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_cell_create_name_empty_string_raises(self): + body = {'cell': {'name': '', + 'username': 'fred', + 'password': 'secret', + 'rpc_host': 'r3.example.org', + 'type': 'parent'}} + + req = self._get_request("cells") + self.assertRaises(exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_cell_create_name_with_bang_raises(self): + body = {'cell': {'name': 'moo!cow', + 'username': 'fred', + 'password': 'secret', + 'rpc_host': 'r3.example.org', + 'type': 'parent'}} + + req = self._get_request("cells") + self.assertRaises(exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_cell_create_name_with_dot_raises(self): + body = {'cell': {'name': 'moo.cow', + 'username': 'fred', + 'password': 'secret', + 'rpc_host': 'r3.example.org', + 'type': 'parent'}} + + req = self._get_request("cells") + self.assertRaises(exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_cell_create_name_with_invalid_type_raises(self): + body = {'cell': {'name': 'moocow', + 'username': 'fred', + 'password': 'secret', + 'rpc_host': 'r3.example.org', + 'type': 'invalid'}} + + req = self._get_request("cells") + self.assertRaises(exc.HTTPBadRequest, + self.controller.create, req, body) + + def test_cell_update(self): + body = {'cell': {'username': 'zeb', + 'password': 'sneaky'}} + + req = self._get_request("cells/cell1") + res_dict = self.controller.update(req, 'cell1', body) + cell = res_dict['cell'] + + self.assertEqual(cell['name'], 'cell1') + self.assertEqual(cell['rpc_host'], FAKE_CELLS[0]['rpc_host']) + self.assertEqual(cell['username'], 'zeb') + self.assertNotIn('password', cell) + + def test_cell_update_empty_name_raises(self): + body = {'cell': {'name': '', + 'username': 'zeb', + 'password': 'sneaky'}} + + req = self._get_request("cells/cell1") + self.assertRaises(exc.HTTPBadRequest, + self.controller.update, req, 'cell1', body) + + def test_cell_update_invalid_type_raises(self): + body = {'cell': {'username': 'zeb', + 'type': 'invalid', + 'password': 'sneaky'}} + + req = self._get_request("cells/cell1") + self.assertRaises(exc.HTTPBadRequest, + self.controller.update, req, 'cell1', body) + + def test_cell_info(self): + caps = ['cap1=a;b', 'cap2=c;d'] + self.flags(name='darksecret', capabilities=caps, group='cells') + + req = self._get_request("cells/info") + res_dict = self.controller.info(req) + cell = res_dict['cell'] + cell_caps = cell['capabilities'] + + self.assertEqual(cell['name'], 'darksecret') + self.assertEqual(cell_caps['cap1'], 'a;b') + self.assertEqual(cell_caps['cap2'], 'c;d') + + def test_sync_instances(self): + call_info = {} + + def sync_instances(self, context, **kwargs): + call_info['project_id'] = kwargs.get('project_id') + call_info['updated_since'] = kwargs.get('updated_since') + + self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances) + + req = self._get_request("cells/sync_instances") + body = {} + self.controller.sync_instances(req, body=body) + self.assertEqual(call_info['project_id'], None) + self.assertEqual(call_info['updated_since'], None) + + body = {'project_id': 'test-project'} + self.controller.sync_instances(req, body=body) + self.assertEqual(call_info['project_id'], 'test-project') + self.assertEqual(call_info['updated_since'], None) + + expected = timeutils.utcnow().isoformat() + if not expected.endswith("+00:00"): + expected += "+00:00" + + body = {'updated_since': expected} + self.controller.sync_instances(req, body=body) + self.assertEqual(call_info['project_id'], None) + self.assertEqual(call_info['updated_since'], expected) + + body = {'updated_since': 'skjdfkjsdkf'} + self.assertRaises(exc.HTTPBadRequest, + self.controller.sync_instances, req, body=body) + + body = {'foo': 'meow'} + self.assertRaises(exc.HTTPBadRequest, + self.controller.sync_instances, req, body=body) + + +class TestCellsXMLSerializer(test.TestCase): + def test_multiple_cells(self): + fixture = {'cells': fake_cells_api_get_all_cell_info()} + + serializer = cells_ext.CellsTemplate() + output = serializer.serialize(fixture) + res_tree = etree.XML(output) + + self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10) + self.assertEqual(len(res_tree), 2) + self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10) + self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10) + + def test_single_cell_with_caps(self): + cell = {'id': 1, + 'name': 'darksecret', + 'username': 'meow', + 'capabilities': {'cap1': 'a;b', + 'cap2': 'c;d'}} + fixture = {'cell': cell} + + serializer = cells_ext.CellTemplate() + output = serializer.serialize(fixture) + res_tree = etree.XML(output) + + self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10) + self.assertEqual(res_tree.get('name'), 'darksecret') + self.assertEqual(res_tree.get('username'), 'meow') + self.assertEqual(res_tree.get('password'), None) + self.assertEqual(len(res_tree), 1) + + child = res_tree[0] + self.assertEqual(child.tag, + '{%s}capabilities' % xmlutil.XMLNS_V10) + for elem in child: + self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10, + '{%s}cap2' % xmlutil.XMLNS_V10)) + if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10: + self.assertEqual(elem.text, 'a;b') + elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10: + self.assertEqual(elem.text, 'c;d') + + def test_single_cell_without_caps(self): + cell = {'id': 1, + 'username': 'woof', + 'name': 'darksecret'} + fixture = {'cell': cell} + + serializer = cells_ext.CellTemplate() + output = serializer.serialize(fixture) + res_tree = etree.XML(output) + + self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10) + self.assertEqual(res_tree.get('name'), 'darksecret') + self.assertEqual(res_tree.get('username'), 'woof') + self.assertEqual(res_tree.get('password'), None) + self.assertEqual(len(res_tree), 0) + + +class TestCellsXMLDeserializer(test.TestCase): + def test_cell_deserializer(self): + caps_dict = {'cap1': 'a;b', + 'cap2': 'c;d'} + caps_xml = ("<capabilities><cap1>a;b</cap1>" + "<cap2>c;d</cap2></capabilities>") + expected = {'cell': {'name': 'testcell1', + 'type': 'child', + 'rpc_host': 'localhost', + 'capabilities': caps_dict}} + intext = ("<?xml version='1.0' encoding='UTF-8'?>\n" + "<cell><name>testcell1</name><type>child</type>" + "<rpc_host>localhost</rpc_host>" + "%s</cell>") % caps_xml + deserializer = cells_ext.CellDeserializer() + result = deserializer.deserialize(intext) + self.assertEqual(dict(body=expected), result) diff --git a/nova/tests/api/openstack/compute/contrib/test_consoles.py b/nova/tests/api/openstack/compute/contrib/test_consoles.py index d251c6b75..cf044dfcd 100644 --- a/nova/tests/api/openstack/compute/contrib/test_consoles.py +++ b/nova/tests/api/openstack/compute/contrib/test_consoles.py @@ -26,19 +26,36 @@ def fake_get_vnc_console(self, _context, _instance, _console_type): return {'url': 'http://fake'} +def fake_get_spice_console(self, _context, _instance, _console_type): + return {'url': 'http://fake'} + + def fake_get_vnc_console_invalid_type(self, _context, _instance, _console_type): raise exception.ConsoleTypeInvalid(console_type=_console_type) +def fake_get_spice_console_invalid_type(self, _context, + _instance, _console_type): + raise exception.ConsoleTypeInvalid(console_type=_console_type) + + def fake_get_vnc_console_not_ready(self, _context, instance, _console_type): raise exception.InstanceNotReady(instance_id=instance["uuid"]) +def fake_get_spice_console_not_ready(self, _context, instance, _console_type): + raise exception.InstanceNotReady(instance_id=instance["uuid"]) + + def fake_get_vnc_console_not_found(self, _context, instance, _console_type): raise exception.InstanceNotFound(instance_id=instance["uuid"]) +def fake_get_spice_console_not_found(self, _context, instance, _console_type): + raise exception.InstanceNotFound(instance_id=instance["uuid"]) + + def fake_get(self, context, instance_uuid): return {'uuid': instance_uuid} @@ -53,6 +70,8 @@ class ConsolesExtensionTest(test.TestCase): super(ConsolesExtensionTest, self).setUp() self.stubs.Set(compute_api.API, 'get_vnc_console', fake_get_vnc_console) + self.stubs.Set(compute_api.API, 'get_spice_console', + fake_get_spice_console) self.stubs.Set(compute_api.API, 'get', fake_get) self.flags( osapi_compute_extension=[ @@ -132,3 +151,76 @@ class ConsolesExtensionTest(test.TestCase): res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + + def test_get_spice_console(self): + body = {'os-getSPICEConsole': {'type': 'spice-html5'}} + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + output = jsonutils.loads(res.body) + self.assertEqual(res.status_int, 200) + self.assertEqual(output, + {u'console': {u'url': u'http://fake', u'type': u'spice-html5'}}) + + def test_get_spice_console_not_ready(self): + self.stubs.Set(compute_api.API, 'get_spice_console', + fake_get_spice_console_not_ready) + body = {'os-getSPICEConsole': {'type': 'spice-html5'}} + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + output = jsonutils.loads(res.body) + self.assertEqual(res.status_int, 409) + + def test_get_spice_console_no_type(self): + self.stubs.Set(compute_api.API, 'get_spice_console', + fake_get_spice_console_invalid_type) + body = {'os-getSPICEConsole': {}} + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + self.assertEqual(res.status_int, 400) + + def test_get_spice_console_no_instance(self): + self.stubs.Set(compute_api.API, 'get', fake_get_not_found) + body = {'os-getSPICEConsole': {'type': 'spice-html5'}} + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + self.assertEqual(res.status_int, 404) + + def test_get_spice_console_no_instance_on_console_get(self): + self.stubs.Set(compute_api.API, 'get_spice_console', + fake_get_spice_console_not_found) + body = {'os-getSPICEConsole': {'type': 'spice-html5'}} + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + self.assertEqual(res.status_int, 404) + + def test_get_spice_console_invalid_type(self): + body = {'os-getSPICEConsole': {'type': 'invalid'}} + self.stubs.Set(compute_api.API, 'get_spice_console', + fake_get_spice_console_invalid_type) + req = webob.Request.blank('/v2/fake/servers/1/action') + req.method = "POST" + req.body = jsonutils.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(self.app) + self.assertEqual(res.status_int, 400) diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py index a72430fd9..efc9b36cc 100644 --- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py @@ -380,16 +380,16 @@ class FloatingIpTest(test.TestCase): floating_ips = ["10.10.10.10", "10.10.10.11"] if floating_address not in floating_ips: raise exception.FloatingIpNotFoundForAddress( - address=flaoting_address) + address=floating_address) - self.stubs.Set(network.api.API, "associate_floating_ip", - fake_network_api_associate) + self.stubs.Set(network.api.API, "associate_floating_ip", + fake_network_api_associate) - body = dict(addFloatingIp=dict(address='1.1.1.1')) - req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') - self.assertRaises(webob.exc.HTTPNotFound, - self.manager._add_floating_ip, - req, 'test_inst', body) + body = dict(addFloatingIp=dict(address='1.1.1.1')) + req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') + self.assertRaises(webob.exc.HTTPNotFound, + self.manager._add_floating_ip, + req, 'test_inst', body) def test_floating_ip_disassociate_non_existent_ip(self): def network_api_get_floating_ip_by_address(self, context, @@ -400,7 +400,7 @@ class FloatingIpTest(test.TestCase): address=floating_address) self.stubs.Set(network.api.API, "get_floating_ip_by_address", - network_api_get_floating_ip_by_address) + network_api_get_floating_ip_by_address) body = dict(removeFloatingIp=dict(address='1.1.1.1')) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py index be4465cf9..e103b5b19 100644 --- a/nova/tests/api/openstack/compute/contrib/test_hosts.py +++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py @@ -32,8 +32,10 @@ def stub_service_get_all(context, disabled=None): return fake_hosts.SERVICES_LIST -def stub_service_does_host_exist(context, host_name): - return host_name in [row['host'] for row in stub_service_get_all(context)] +def stub_service_get_by_host_and_topic(context, host_name, topic): + for service in stub_service_get_all(context): + if service['host'] == host_name and service['topic'] == topic: + return service def stub_set_host_enabled(context, host_name, enabled): @@ -130,8 +132,8 @@ class HostTestCase(test.TestCase): self.stubs.Set(db, 'service_get_all', stub_service_get_all) # Only hosts in our fake DB exist - self.stubs.Set(db, 'service_does_host_exist', - stub_service_does_host_exist) + self.stubs.Set(db, 'service_get_by_host_and_topic', + stub_service_get_by_host_and_topic) # 'host_c1' always succeeds, and 'host_c2' self.stubs.Set(self.hosts_api, 'set_host_enabled', stub_set_host_enabled) diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py index ba65e8f6a..44d9e8af3 100644 --- a/nova/tests/api/openstack/compute/contrib/test_networks.py +++ b/nova/tests/api/openstack/compute/contrib/test_networks.py @@ -21,8 +21,8 @@ import uuid import webob -from nova.api.openstack.compute.contrib import admin_networks as networks from nova.api.openstack.compute.contrib import networks_associate +from nova.api.openstack.compute.contrib import os_networks as networks from nova import exception from nova.openstack.common import cfg from nova import test @@ -177,7 +177,7 @@ class NetworksTest(test.TestCase): def setUp(self): super(NetworksTest, self).setUp() self.fake_network_api = FakeNetworkAPI() - self.controller = networks.AdminNetworkController( + self.controller = networks.NetworkController( self.fake_network_api) self.associate_controller = networks_associate\ .NetworkAssociateActionController(self.fake_network_api) diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py index 24f169d98..3a6e5db7c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_services.py +++ b/nova/tests/api/openstack/compute/contrib/test_services.py @@ -26,30 +26,30 @@ from nova.tests.api.openstack import fakes fake_services_list = [{'binary': 'nova-scheduler', 'host': 'host1', - 'availability_zone': 'nova', 'id': 1, 'disabled': True, + 'topic': 'scheduler', 'updated_at': datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'nova-compute', 'host': 'host1', - 'availability_zone': 'nova', 'id': 2, 'disabled': True, + 'topic': 'compute', 'updated_at': datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime(2012, 9, 18, 2, 46, 27)}, {'binary': 'nova-scheduler', 'host': 'host2', - 'availability_zone': 'nova', 'id': 3, 'disabled': False, + 'topic': 'scheduler', 'updated_at': datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, {'binary': 'nova-compute', 'host': 'host2', - 'availability_zone': 'nova', 'id': 4, 'disabled': True, + 'topic': 'compute', 'updated_at': datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime(2012, 9, 18, 2, 46, 28)}, ] @@ -60,7 +60,7 @@ class FakeRequest(object): GET = {} -class FakeRequestWithSevice(object): +class FakeRequestWithService(object): environ = {"nova.context": context.get_admin_context()} GET = {"service": "nova-compute"} @@ -75,7 +75,7 @@ class FakeRequestWithHostService(object): GET = {"host": "host1", "service": "nova-compute"} -def fake_servcie_get_all(context): +def fake_service_get_all(context): return fake_services_list @@ -111,7 +111,7 @@ class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() - self.stubs.Set(db, "service_get_all", fake_servcie_get_all) + self.stubs.Set(db, "service_get_all", fake_service_get_all) self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(db, "service_get_by_args", fake_service_get_by_host_binary) @@ -128,7 +128,7 @@ class ServicesTest(test.TestCase): res_dict = self.controller.index(req) response = {'services': [{'binary': 'nova-scheduler', - 'host': 'host1', 'zone': 'nova', + 'host': 'host1', 'zone': 'internal', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'nova-compute', @@ -136,7 +136,7 @@ class ServicesTest(test.TestCase): 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'nova-scheduler', 'host': 'host2', - 'zone': 'nova', + 'zone': 'internal', 'status': 'enabled', 'state': 'down', 'updated_at': datetime(2012, 9, 19, 6, 55, 34)}, {'binary': 'nova-compute', 'host': 'host2', @@ -150,7 +150,7 @@ class ServicesTest(test.TestCase): res_dict = self.controller.index(req) response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1', - 'zone': 'nova', + 'zone': 'internal', 'status': 'disabled', 'state': 'up', 'updated_at': datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'nova-compute', 'host': 'host1', @@ -160,7 +160,7 @@ class ServicesTest(test.TestCase): self.assertEqual(res_dict, response) def test_services_list_with_service(self): - req = FakeRequestWithSevice() + req = FakeRequestWithService() res_dict = self.controller.index(req) response = {'services': [{'binary': 'nova-compute', 'host': 'host1', diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py index e3810510b..485968209 100644 --- a/nova/tests/api/openstack/compute/test_extensions.py +++ b/nova/tests/api/openstack/compute/test_extensions.py @@ -185,7 +185,6 @@ class ExtensionControllerTest(ExtensionTestCase): "Keypairs", "Multinic", "MultipleCreate", - "OSNetworks", "QuotaClasses", "Quotas", "Rescue", diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py index f0f2f02d5..375355a70 100644 --- a/nova/tests/api/openstack/compute/test_limits.py +++ b/nova/tests/api/openstack/compute/test_limits.py @@ -618,7 +618,7 @@ class WsgiLimiterTest(BaseLimitTestSuite): self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): - """Get data decribing a limit request verb/path.""" + """Get data describing a limit request verb/path.""" return jsonutils.dumps({"verb": verb, "path": path}) def _request(self, verb, url, username=None): diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py index 1e992c2a3..71fa9f3f3 100644 --- a/nova/tests/api/openstack/compute/test_server_metadata.py +++ b/nova/tests/api/openstack/compute/test_server_metadata.py @@ -21,6 +21,7 @@ import webob from nova.api.openstack.compute import server_metadata from nova.compute import rpcapi as compute_rpcapi +from nova.compute import vm_states import nova.db from nova import exception from nova.openstack.common import cfg @@ -75,14 +76,16 @@ def return_server(context, server_id): return {'id': server_id, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', - 'locked': False} + 'locked': False, + 'vm_state': vm_states.ACTIVE} def return_server_by_uuid(context, server_uuid): return {'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', - 'locked': False} + 'locked': False, + 'vm_state': vm_states.ACTIVE} def return_server_nonexistent(context, server_id): @@ -93,10 +96,9 @@ def fake_change_instance_metadata(self, context, instance, diff): pass -class ServerMetaDataTest(test.TestCase): - +class BaseTest(test.TestCase): def setUp(self): - super(ServerMetaDataTest, self).setUp() + super(BaseTest, self).setUp() fakes.stub_out_key_pair_funcs(self.stubs) self.stubs.Set(nova.db, 'instance_get', return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', @@ -112,6 +114,9 @@ class ServerMetaDataTest(test.TestCase): self.uuid = str(uuid.uuid4()) self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid + +class ServerMetaDataTest(BaseTest): + def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.uuid) @@ -510,3 +515,50 @@ class ServerMetaDataTest(test.TestCase): req.body = jsonutils.dumps(data) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.uuid, data) + + +class BadStateServerMetaDataTest(BaseTest): + + def setUp(self): + super(BadStateServerMetaDataTest, self).setUp() + self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build) + self.stubs.Set(nova.db, 'instance_get_by_uuid', + self._return_server_in_build_by_uuid) + self.stubs.Set(nova.db, 'instance_metadata_delete', + delete_server_metadata) + + def test_invalid_state_on_delete(self): + req = fakes.HTTPRequest.blank(self.url + '/key2') + req.method = 'DELETE' + self.assertRaises(webob.exc.HTTPConflict, self.controller.delete, + req, self.uuid, 'key2') + + def test_invalid_state_on_update_metadata(self): + self.stubs.Set(nova.db, 'instance_metadata_update', + return_create_instance_metadata) + req = fakes.HTTPRequest.blank(self.url) + req.method = 'POST' + req.content_type = 'application/json' + expected = { + 'metadata': { + 'key1': 'updatedvalue', + 'key29': 'newkey', + } + } + req.body = jsonutils.dumps(expected) + self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all, + req, self.uuid, expected) + + def _return_server_in_build(self, context, server_id): + return {'id': server_id, + 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'locked': False, + 'vm_state': vm_states.BUILDING} + + def _return_server_in_build_by_uuid(self, context, server_uuid): + return {'id': 1, + 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', + 'name': 'fake', + 'locked': False, + 'vm_state': vm_states.BUILDING} diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 1fa1e67e5..5456c23af 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -835,6 +835,12 @@ class ServersControllerTest(test.TestCase): self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], server_uuid) + def test_get_servers_with_bad_flavor(self): + req = fakes.HTTPRequest.blank('/v2/fake/servers?flavor=abcde') + servers = self.controller.index(req)['servers'] + + self.assertEqual(len(servers), 0) + def test_get_servers_allows_status(self): server_uuid = str(uuid.uuid4()) @@ -2246,6 +2252,74 @@ class ServersControllerCreateTest(test.TestCase): self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params) + def test_create_instance_with_device_name_not_string(self): + self.ext_mgr.extensions = {'os-volumes': 'fake'} + bdm = [{'delete_on_termination': 1, + 'device_name': 123, + 'volume_size': 1, + 'volume_id': '11111111-1111-1111-1111-111111111111'}] + params = {'block_device_mapping': bdm} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['block_device_mapping'], bdm) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params) + + def test_create_instance_with_device_name_empty(self): + self.ext_mgr.extensions = {'os-volumes': 'fake'} + bdm = [{'delete_on_termination': 1, + 'device_name': '', + 'volume_size': 1, + 'volume_id': '11111111-1111-1111-1111-111111111111'}] + params = {'block_device_mapping': bdm} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['block_device_mapping'], bdm) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params) + + def test_create_instance_with_device_name_too_long(self): + self.ext_mgr.extensions = {'os-volumes': 'fake'} + bdm = [{'delete_on_termination': 1, + 'device_name': 'a' * 256, + 'volume_size': 1, + 'volume_id': '11111111-1111-1111-1111-111111111111'}] + params = {'block_device_mapping': bdm} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['block_device_mapping'], bdm) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params) + + def test_create_instance_with_space_in_device_name(self): + self.ext_mgr.extensions = {'os-volumes': 'fake'} + bdm = [{'delete_on_termination': 1, + 'device_name': 'vd a', + 'volume_size': 1, + 'volume_id': '11111111-1111-1111-1111-111111111111'}] + params = {'block_device_mapping': bdm} + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['block_device_mapping'], bdm) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params) + def test_create_instance_with_bdm_delete_on_termination(self): self.ext_mgr.extensions = {'os-volumes': 'fake'} bdm = [{'device_name': 'foo1', 'delete_on_termination': 1}, @@ -4057,7 +4131,7 @@ class ServersViewBuilderTest(test.TestCase): "message": "Error", 'details': 'Stock details for test'} - self.request.context = context.get_admin_context() + self.request.environ['nova.context'].is_admin = True output = self.view_builder.show(self.request, self.instance) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) @@ -4076,7 +4150,7 @@ class ServersViewBuilderTest(test.TestCase): "created": "2010-10-10T12:00:00Z", "message": "Error"} - self.request.context = context.get_admin_context() + self.request.environ['nova.context'].is_admin = True output = self.view_builder.show(self.request, self.instance) self.assertThat(output['server']['fault'], matchers.DictMatches(expected_fault)) diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py index d5384eff0..37ef71881 100644 --- a/nova/tests/baremetal/test_driver.py +++ b/nova/tests/baremetal/test_driver.py @@ -136,6 +136,19 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase): row = db.bm_node_get(self.context, self.node['id']) self.assertEqual(row['task_state'], baremetal_states.ACTIVE) + def test_macs_for_instance(self): + self._create_node() + expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02']) + self.assertEqual( + expected, self.driver.macs_for_instance(self.test_instance)) + + def test_macs_for_instance_no_interfaces(self): + # Nodes cannot boot with no MACs, so we raise an error if that happens. + self.nic_info = [] + self._create_node() + self.assertRaises(exception.NovaException, + self.driver.macs_for_instance, self.test_instance) + def test_spawn_node_in_use(self): self._create_node() db.bm_node_update(self.context, self.node['id'], diff --git a/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py new file mode 100644 index 000000000..56c3f953e --- /dev/null +++ b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py @@ -0,0 +1,256 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NTT DOCOMO, INC. +# Copyright 2011 OpenStack LLC +# Copyright 2011 Ilya Alekseyev +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import imp +import os +import sys +import tempfile +import time + +from nova import test + +from nova.tests.baremetal.db import base as bm_db_base + + +TOPDIR = os.path.normpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + os.pardir, + os.pardir, + os.pardir)) +BMDH_PATH = os.path.join(TOPDIR, 'bin', 'nova-baremetal-deploy-helper') + +sys.dont_write_bytecode = True +bmdh = imp.load_source('bmdh', BMDH_PATH) +sys.dont_write_bytecode = False + +_PXECONF_DEPLOY = """ +default deploy + +label deploy +kernel deploy_kernel +append initrd=deploy_ramdisk +ipappend 3 + +label boot +kernel kernel +append initrd=ramdisk root=${ROOT} +""" + +_PXECONF_BOOT = """ +default boot + +label deploy +kernel deploy_kernel +append initrd=deploy_ramdisk +ipappend 3 + +label boot +kernel kernel +append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef +""" + + +class WorkerTestCase(bm_db_base.BMDBTestCase): + def setUp(self): + super(WorkerTestCase, self).setUp() + self.worker = bmdh.Worker() + # Make tearDown() fast + self.worker.queue_timeout = 0.1 + self.worker.start() + + def tearDown(self): + if self.worker.isAlive(): + self.worker.stop = True + self.worker.join(timeout=1) + super(WorkerTestCase, self).tearDown() + + def wait_queue_empty(self, timeout): + for _ in xrange(int(timeout / 0.1)): + if bmdh.QUEUE.empty(): + break + time.sleep(0.1) + + def test_run_calls_deploy(self): + """Check all queued requests are passed to deploy().""" + history = [] + + def fake_deploy(**params): + history.append(params) + + self.stubs.Set(bmdh, 'deploy', fake_deploy) + params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}] + for (dep_id, params) in enumerate(params_list): + bmdh.QUEUE.put((dep_id, params)) + self.wait_queue_empty(1) + self.assertEqual(params_list, history) + + def test_run_with_failing_deploy(self): + """Check a worker keeps on running even if deploy() raises + an exception. + """ + history = [] + + def fake_deploy(**params): + history.append(params) + # always fail + raise Exception('test') + + self.stubs.Set(bmdh, 'deploy', fake_deploy) + params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}] + for (dep_id, params) in enumerate(params_list): + bmdh.QUEUE.put((dep_id, params)) + self.wait_queue_empty(1) + self.assertEqual(params_list, history) + + +class PhysicalWorkTestCase(test.TestCase): + def setUp(self): + super(PhysicalWorkTestCase, self).setUp() + + def noop(*args, **kwargs): + pass + + self.stubs.Set(time, 'sleep', noop) + + def test_deploy(self): + """Check loosely all functions are called with right args.""" + address = '127.0.0.1' + port = 3306 + iqn = 'iqn.xyz' + lun = 1 + image_path = '/tmp/xyz/image' + pxe_config_path = '/tmp/abc/pxeconfig' + root_mb = 128 + swap_mb = 64 + + dev = '/dev/fake' + root_part = '/dev/fake-part1' + swap_part = '/dev/fake-part2' + root_uuid = '12345678-1234-1234-12345678-12345678abcdef' + + self.mox.StubOutWithMock(bmdh, 'get_dev') + self.mox.StubOutWithMock(bmdh, 'get_image_mb') + self.mox.StubOutWithMock(bmdh, 'discovery') + self.mox.StubOutWithMock(bmdh, 'login_iscsi') + self.mox.StubOutWithMock(bmdh, 'logout_iscsi') + self.mox.StubOutWithMock(bmdh, 'make_partitions') + self.mox.StubOutWithMock(bmdh, 'is_block_device') + self.mox.StubOutWithMock(bmdh, 'dd') + self.mox.StubOutWithMock(bmdh, 'mkswap') + self.mox.StubOutWithMock(bmdh, 'block_uuid') + self.mox.StubOutWithMock(bmdh, 'switch_pxe_config') + self.mox.StubOutWithMock(bmdh, 'notify') + + bmdh.get_dev(address, port, iqn, lun).AndReturn(dev) + bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb + bmdh.discovery(address, port) + bmdh.login_iscsi(address, port, iqn) + bmdh.is_block_device(dev).AndReturn(True) + bmdh.make_partitions(dev, root_mb, swap_mb) + bmdh.is_block_device(root_part).AndReturn(True) + bmdh.is_block_device(swap_part).AndReturn(True) + bmdh.dd(image_path, root_part) + bmdh.mkswap(swap_part) + bmdh.block_uuid(root_part).AndReturn(root_uuid) + bmdh.logout_iscsi(address, port, iqn) + bmdh.switch_pxe_config(pxe_config_path, root_uuid) + bmdh.notify(address, 10000) + self.mox.ReplayAll() + + bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path, + root_mb, swap_mb) + + def test_always_logout_iscsi(self): + """logout_iscsi() must be called once login_iscsi() is called.""" + address = '127.0.0.1' + port = 3306 + iqn = 'iqn.xyz' + lun = 1 + image_path = '/tmp/xyz/image' + pxe_config_path = '/tmp/abc/pxeconfig' + root_mb = 128 + swap_mb = 64 + + dev = '/dev/fake' + + self.mox.StubOutWithMock(bmdh, 'get_dev') + self.mox.StubOutWithMock(bmdh, 'get_image_mb') + self.mox.StubOutWithMock(bmdh, 'discovery') + self.mox.StubOutWithMock(bmdh, 'login_iscsi') + self.mox.StubOutWithMock(bmdh, 'logout_iscsi') + self.mox.StubOutWithMock(bmdh, 'work_on_disk') + + class TestException(Exception): + pass + + bmdh.get_dev(address, port, iqn, lun).AndReturn(dev) + bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb + bmdh.discovery(address, port) + bmdh.login_iscsi(address, port, iqn) + bmdh.work_on_disk(dev, root_mb, swap_mb, image_path).\ + AndRaise(TestException) + bmdh.logout_iscsi(address, port, iqn) + self.mox.ReplayAll() + + self.assertRaises(TestException, + bmdh.deploy, + address, port, iqn, lun, image_path, + pxe_config_path, root_mb, swap_mb) + + +class SwitchPxeConfigTestCase(test.TestCase): + def setUp(self): + super(SwitchPxeConfigTestCase, self).setUp() + (fd, self.fname) = tempfile.mkstemp() + os.write(fd, _PXECONF_DEPLOY) + os.close(fd) + + def tearDown(self): + os.unlink(self.fname) + super(SwitchPxeConfigTestCase, self).tearDown() + + def test_switch_pxe_config(self): + bmdh.switch_pxe_config(self.fname, + '12345678-1234-1234-1234-1234567890abcdef') + with open(self.fname, 'r') as f: + pxeconf = f.read() + self.assertEqual(pxeconf, _PXECONF_BOOT) + + +class OtherFunctionTestCase(test.TestCase): + def test_get_dev(self): + expected = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9' + actual = bmdh.get_dev('1.2.3.4', 5678, 'iqn.fake', 9) + self.assertEqual(expected, actual) + + def test_get_image_mb(self): + mb = 1024 * 1024 + size = None + + def fake_getsize(path): + return size + + self.stubs.Set(os.path, 'getsize', fake_getsize) + size = 0 + self.assertEqual(bmdh.get_image_mb('x'), 0) + size = 1 + self.assertEqual(bmdh.get_image_mb('x'), 1) + size = mb + self.assertEqual(bmdh.get_image_mb('x'), 1) + size = mb + 1 + self.assertEqual(bmdh.get_image_mb('x'), 2) diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py index 45c9ede43..73ef8caa3 100644 --- a/nova/tests/baremetal/test_pxe.py +++ b/nova/tests/baremetal/test_pxe.py @@ -147,12 +147,6 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase): config = pxe.build_network_config(net) self.assertIn('eth0', config) self.assertNotIn('eth1', config) - self.assertIn('hwaddress ether fake', config) - self.assertNotIn('hwaddress ether aa:bb:cc:dd', config) - - net[0][1]['mac'] = 'aa:bb:cc:dd' - config = pxe.build_network_config(net) - self.assertIn('hwaddress ether aa:bb:cc:dd', config) net = utils.get_test_network_info(2) config = pxe.build_network_config(net) @@ -254,6 +248,13 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase): pxe.get_tftp_image_info, self.instance) + # Test that other non-true values also raise an exception + CONF.baremetal.deploy_kernel = "" + CONF.baremetal.deploy_ramdisk = "" + self.assertRaises(exception.NovaException, + pxe.get_tftp_image_info, + self.instance) + # Even if the instance includes kernel_id and ramdisk_id, # we still need deploy_kernel_id and deploy_ramdisk_id. # If those aren't present in instance[], and not specified in @@ -295,6 +296,17 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase): self.assertEqual(res['deploy_kernel'][0], 'eeee') self.assertEqual(res['deploy_ramdisk'][0], 'ffff') + # However, if invalid values are passed on the image extra_specs, + # this should still raise an exception. + extra_specs = { + 'deploy_kernel_id': '', + 'deploy_ramdisk_id': '', + } + self.instance['extra_specs'] = extra_specs + self.assertRaises(exception.NovaException, + pxe.get_tftp_image_info, + self.instance) + class PXEPrivateMethodsTestCase(BareMetalPXETestCase): @@ -306,15 +318,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase): macs = self.driver._collect_mac_addresses(self.context, self.node) self.assertEqual(macs, address_list) - def test_generate_udev_rules(self): - self._create_node() - address_list = [nic['address'] for nic in self.nic_info] - address_list.append(self.node_info['prov_mac_address']) - - rules = self.driver._generate_udev_rules(self.context, self.node) - for address in address_list: - self.assertIn('ATTR{address}=="%s"' % address, rules) - def test_cache_tftp_images(self): self.instance['kernel_id'] = 'aaaa' self.instance['ramdisk_id'] = 'bbbb' @@ -357,8 +360,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase): # nova.virt.disk.api._inject_*_into_fs self._create_node() files = [] - files.append(('/etc/udev/rules.d/70-persistent-net.rules', - self.driver._generate_udev_rules(self.context, self.node))) self.instance['hostname'] = 'fake hostname' files.append(('/etc/hostname', 'fake hostname')) self.instance['key_data'] = 'fake ssh key' diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py index 72ef3f1f0..ef165f4ed 100644 --- a/nova/tests/cells/test_cells_manager.py +++ b/nova/tests/cells/test_cells_manager.py @@ -38,6 +38,21 @@ class CellsManagerClassTestCase(test.TestCase): self.driver = self.cells_manager.driver self.ctxt = 'fake_context' + def _get_fake_responses(self): + responses = [] + expected_responses = [] + for x in xrange(1, 4): + responses.append(messaging.Response('cell%s' % x, x, False)) + expected_responses.append(('cell%s' % x, x)) + return expected_responses, responses + + def test_get_cell_info_for_neighbors(self): + self.mox.StubOutWithMock(self.cells_manager.state_manager, + 'get_cell_info_for_neighbors') + self.cells_manager.state_manager.get_cell_info_for_neighbors() + self.mox.ReplayAll() + self.cells_manager.get_cell_info_for_neighbors(self.ctxt) + def test_post_start_hook_child_cell(self): self.mox.StubOutWithMock(self.driver, 'start_consumers') self.mox.StubOutWithMock(context, 'get_admin_context') @@ -211,3 +226,14 @@ class CellsManagerClassTestCase(test.TestCase): # Now the last 1 and the first 1 self.assertEqual(call_info['sync_instances'], [instances[-1], instances[0]]) + + def test_sync_instances(self): + self.mox.StubOutWithMock(self.msg_runner, + 'sync_instances') + self.msg_runner.sync_instances(self.ctxt, 'fake-project', + 'fake-time', 'fake-deleted') + self.mox.ReplayAll() + self.cells_manager.sync_instances(self.ctxt, + project_id='fake-project', + updated_since='fake-time', + deleted='fake-deleted') diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py index 9973716f6..da45721ed 100644 --- a/nova/tests/cells/test_cells_messaging.py +++ b/nova/tests/cells/test_cells_messaging.py @@ -14,11 +14,14 @@ """ Tests For Cells Messaging module """ +import mox from nova.cells import messaging +from nova.cells import utils as cells_utils from nova import context from nova import exception from nova.openstack.common import cfg +from nova.openstack.common import timeutils from nova import test from nova.tests.cells import fakes @@ -912,3 +915,46 @@ class CellsBroadcastMethodsTestCase(test.TestCase): self.src_msg_runner.bw_usage_update_at_top(self.ctxt, fake_bw_update_info) + + def test_sync_instances(self): + # Reset this, as this is a broadcast down. + self._setup_attrs(up=False) + project_id = 'fake_project_id' + updated_since_raw = 'fake_updated_since_raw' + updated_since_parsed = 'fake_updated_since_parsed' + deleted = 'fake_deleted' + + instance1 = dict(uuid='fake_uuid1', deleted=False) + instance2 = dict(uuid='fake_uuid2', deleted=True) + fake_instances = [instance1, instance2] + + self.mox.StubOutWithMock(self.tgt_msg_runner, + 'instance_update_at_top') + self.mox.StubOutWithMock(self.tgt_msg_runner, + 'instance_destroy_at_top') + + self.mox.StubOutWithMock(timeutils, 'parse_isotime') + self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync') + + # Middle cell. + timeutils.parse_isotime(updated_since_raw).AndReturn( + updated_since_parsed) + cells_utils.get_instances_to_sync(self.ctxt, + updated_since=updated_since_parsed, + project_id=project_id, + deleted=deleted).AndReturn([]) + + # Bottom/Target cell + timeutils.parse_isotime(updated_since_raw).AndReturn( + updated_since_parsed) + cells_utils.get_instances_to_sync(self.ctxt, + updated_since=updated_since_parsed, + project_id=project_id, + deleted=deleted).AndReturn(fake_instances) + self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1) + self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2) + + self.mox.ReplayAll() + + self.src_msg_runner.sync_instances(self.ctxt, + project_id, updated_since_raw, deleted) diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py index b51bfa0c1..5e045aca9 100644 --- a/nova/tests/cells/test_cells_rpcapi.py +++ b/nova/tests/cells/test_cells_rpcapi.py @@ -204,3 +204,23 @@ class CellsAPITestCase(test.TestCase): expected_args = {'bw_update_info': bw_update_info} self._check_result(call_info, 'bw_usage_update_at_top', expected_args) + + def test_get_cell_info_for_neighbors(self): + call_info = self._stub_rpc_method('call', 'fake_response') + result = self.cells_rpcapi.get_cell_info_for_neighbors( + self.fake_context) + self._check_result(call_info, 'get_cell_info_for_neighbors', {}, + version='1.1') + self.assertEqual(result, 'fake_response') + + def test_sync_instances(self): + call_info = self._stub_rpc_method('cast', None) + self.cells_rpcapi.sync_instances(self.fake_context, + project_id='fake_project', updated_since='fake_time', + deleted=True) + + expected_args = {'project_id': 'fake_project', + 'updated_since': 'fake_time', + 'deleted': True} + self._check_result(call_info, 'sync_instances', expected_args, + version='1.1') diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 08d9451b3..b8212848c 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -60,7 +60,6 @@ from nova import test from nova.tests.compute import fake_resource_tracker from nova.tests.db.fakes import FakeModel from nova.tests import fake_network -from nova.tests import fake_network_cache_model from nova.tests.image import fake as fake_image from nova.tests import matchers from nova import utils @@ -146,10 +145,11 @@ class BaseTestCase(test.TestCase): fake_network.set_stub_network_methods(self.stubs) def tearDown(self): + ctxt = context.get_admin_context() fake_image.FakeImageService_reset() - instances = db.instance_get_all(self.context.elevated()) + instances = db.instance_get_all(ctxt) for instance in instances: - db.instance_destroy(self.context.elevated(), instance['uuid']) + db.instance_destroy(ctxt, instance['uuid']) fake.restore_nodes() super(BaseTestCase, self).tearDown() @@ -996,96 +996,109 @@ class ComputeTestCase(BaseTestCase): self.compute.terminate_instance(self.context, instance=jsonutils.to_primitive(instance)) - def _stub_out_reboot(self, fake_net_info, fake_block_dev_info): - def fake_reboot(driver, inst, net_info, reboot_type, block_dev_info): - self.assertEqual(block_dev_info, fake_block_dev_info) - self.assertEqual(net_info, fake_net_info) - - self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo', - lambda x: False) - self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot) + def _test_reboot(self, soft, legacy_nwinfo_driver): + # This is a true unit test, so we don't need the network stubs. + fake_network.unset_stub_network_methods(self.stubs) - def test_reboot_soft(self): - # Ensure instance can be soft rebooted. - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {'task_state': task_states.REBOOTING}) + self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info') + self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage') + self.mox.StubOutWithMock(self.compute, '_instance_update') + self.mox.StubOutWithMock(self.compute, '_get_power_state') + self.mox.StubOutWithMock(self.compute.driver, 'legacy_nwinfo') + self.mox.StubOutWithMock(self.compute.driver, 'reboot') + + instance = dict(uuid='fake-instance', + power_state='unknown') + updated_instance1 = dict(uuid='updated-instance1', + power_state='fake') + updated_instance2 = dict(uuid='updated-instance2', + power_state='fake') + + fake_nw_model = network_model.NetworkInfo() + self.mox.StubOutWithMock(fake_nw_model, 'legacy') + + fake_block_dev_info = 'fake_block_dev_info' + fake_power_state1 = 'fake_power_state1' + fake_power_state2 = 'fake_power_state2' + reboot_type = soft and 'SOFT' or 'HARD' + + # Beginning of calls we expect. + + # FIXME(comstud): I don't feel like the context needs to + # be elevated at all. Hopefully remove elevated from + # reboot_instance and remove the stub here in a future patch. + # econtext would just become self.context below then. + econtext = self.context.elevated() + + self.mox.StubOutWithMock(self.context, 'elevated') + self.context.elevated().AndReturn(econtext) + + self.compute._get_instance_nw_info(econtext, + instance).AndReturn( + fake_nw_model) + self.compute._notify_about_instance_usage(econtext, + instance, + 'reboot.start') + self.compute._get_power_state(econtext, + instance).AndReturn(fake_power_state1) + self.compute._instance_update(econtext, instance['uuid'], + power_state=fake_power_state1, + vm_state=vm_states.ACTIVE).AndReturn(updated_instance1) + + # Reboot should check the driver to see if legacy nwinfo is + # needed. If it is, the model's legacy() method should be + # called and the result passed to driver.reboot. If the + # driver wants the model, we pass the model. + self.compute.driver.legacy_nwinfo().AndReturn(legacy_nwinfo_driver) + if legacy_nwinfo_driver: + expected_nw_info = 'legacy-nwinfo' + fake_nw_model.legacy().AndReturn(expected_nw_info) + else: + expected_nw_info = fake_nw_model + + # Annoying. driver.reboot is wrapped in a try/except, and + # doesn't re-raise. It eats exception generated by mox if + # this is called with the wrong args, so we have to hack + # around it. + reboot_call_info = {} + expected_call_info = {'args': (updated_instance1, expected_nw_info, + reboot_type, fake_block_dev_info), + 'kwargs': {}} + + def fake_reboot(*args, **kwargs): + reboot_call_info['args'] = args + reboot_call_info['kwargs'] = kwargs + + self.stubs.Set(self.compute.driver, 'reboot', fake_reboot) + + # Power state should be updated again + self.compute._get_power_state(econtext, + updated_instance1).AndReturn(fake_power_state2) + self.compute._instance_update(econtext, updated_instance1['uuid'], + power_state=fake_power_state2, + task_state=None, + vm_state=vm_states.ACTIVE).AndReturn(updated_instance2) + self.compute._notify_about_instance_usage(econtext, + updated_instance2, + 'reboot.end') - reboot_type = "SOFT" - fake_net_info = [] - fake_block_dev_info = {'foo': 'bar'} - self._stub_out_reboot(fake_net_info, fake_block_dev_info) + self.mox.ReplayAll() self.compute.reboot_instance(self.context, instance=instance, - network_info=fake_net_info, block_device_info=fake_block_dev_info, reboot_type=reboot_type) + self.assertEqual(expected_call_info, reboot_call_info) - inst_ref = db.instance_get_by_uuid(self.context, instance['uuid']) - self.assertEqual(inst_ref['power_state'], power_state.RUNNING) - self.assertEqual(inst_ref['task_state'], None) - - self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + def test_reboot_soft(self): + self._test_reboot(True, False) def test_reboot_hard(self): - # Ensure instance can be hard rebooted. - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {'task_state': task_states.REBOOTING_HARD}) - - reboot_type = "HARD" - fake_net_info = [] - fake_block_dev_info = {'foo': 'bar'} - self._stub_out_reboot(fake_net_info, fake_block_dev_info) - self.compute.reboot_instance(self.context, instance=instance, - network_info=fake_net_info, - block_device_info=fake_block_dev_info, - reboot_type=reboot_type) - - inst_ref = db.instance_get_by_uuid(self.context, instance['uuid']) - self.assertEqual(inst_ref['power_state'], power_state.RUNNING) - self.assertEqual(inst_ref['task_state'], None) + self._test_reboot(False, False) - self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) + def test_reboot_soft_legacy_nwinfo_driver(self): + self._test_reboot(True, True) - def test_reboot_nwinfo(self): - # Ensure instance network info is rehydrated in reboot. - instance = jsonutils.to_primitive(self._create_fake_instance()) - self.compute.run_instance(self.context, instance=instance) - db.instance_update(self.context, instance['uuid'], - {'task_state': task_states.REBOOTING_HARD}) - - result = {'was_instance': []} - - # NOTE(danms): Beware the dragons ahead: - # Since the _legacy_nw_info() method in manager runs inside a - # try..except block, we can't assert from here. Further, this - # will be run more than once during the operation we're about - # to fire off, which means we need to make sure that it doesn't - # fail any of the times it is run. Hence the obscurity below. - def fake_legacy_nw_info(network_info): - result['was_instance'].append( - isinstance(network_info, network_model.NetworkInfo)) - self.stubs.Set(self.compute, '_legacy_nw_info', fake_legacy_nw_info) - - fake_net_info = network_model.NetworkInfo([ - fake_network_cache_model.new_vif(), - fake_network_cache_model.new_vif( - {'address': 'bb:bb:bb:bb:bb:bb'})]) - fake_net_info_p = jsonutils.to_primitive(fake_net_info) - fake_block_dev_info = {'foo': 'bar'} - self.compute.reboot_instance(self.context, instance=instance, - network_info=fake_net_info_p, - block_device_info=fake_block_dev_info, - reboot_type="SOFT") - - inst_ref = db.instance_get_by_uuid(self.context, instance['uuid']) - self.compute.terminate_instance(self.context, - instance=jsonutils.to_primitive(inst_ref)) - self.assertFalse(False in result['was_instance']) + def test_reboot_hard_legacy_nwinfo_driver(self): + self._test_reboot(False, True) def test_set_admin_password(self): # Ensure instance can have its admin password set. @@ -1322,6 +1335,9 @@ class ComputeTestCase(BaseTestCase): def test_novnc_vnc_console(self): # Make sure we can a vnc console for an instance. + self.flags(vnc_enabled=True) + self.flags(enabled=False, group='spice') + instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) @@ -1334,6 +1350,9 @@ class ComputeTestCase(BaseTestCase): def test_xvpvnc_vnc_console(self): # Make sure we can a vnc console for an instance. + self.flags(vnc_enabled=True) + self.flags(enabled=False, group='spice') + instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) @@ -1344,6 +1363,9 @@ class ComputeTestCase(BaseTestCase): def test_invalid_vnc_console_type(self): # Raise useful error if console type is an unrecognised string. + self.flags(vnc_enabled=True) + self.flags(enabled=False, group='spice') + instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) @@ -1354,6 +1376,9 @@ class ComputeTestCase(BaseTestCase): def test_missing_vnc_console_type(self): # Raise useful error is console type is None. + self.flags(vnc_enabled=True) + self.flags(enabled=False, group='spice') + instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) @@ -1362,6 +1387,47 @@ class ComputeTestCase(BaseTestCase): self.context, None, instance=instance) self.compute.terminate_instance(self.context, instance=instance) + def test_spicehtml5_spice_console(self): + # Make sure we can a spice console for an instance. + self.flags(vnc_enabled=False) + self.flags(enabled=True, group='spice') + + instance = jsonutils.to_primitive(self._create_fake_instance()) + self.compute.run_instance(self.context, instance=instance) + + # Try with the full instance + console = self.compute.get_spice_console(self.context, 'spice-html5', + instance=instance) + self.assert_(console) + + self.compute.terminate_instance(self.context, instance=instance) + + def test_invalid_spice_console_type(self): + # Raise useful error if console type is an unrecognised string + self.flags(vnc_enabled=False) + self.flags(enabled=True, group='spice') + + instance = jsonutils.to_primitive(self._create_fake_instance()) + self.compute.run_instance(self.context, instance=instance) + + self.assertRaises(exception.ConsoleTypeInvalid, + self.compute.get_spice_console, + self.context, 'invalid', instance=instance) + self.compute.terminate_instance(self.context, instance=instance) + + def test_missing_spice_console_type(self): + # Raise useful error is console type is None + self.flags(vnc_enabled=False) + self.flags(enabled=True, group='spice') + + instance = jsonutils.to_primitive(self._create_fake_instance()) + self.compute.run_instance(self.context, instance=instance) + + self.assertRaises(exception.ConsoleTypeInvalid, + self.compute.get_spice_console, + self.context, None, instance=instance) + self.compute.terminate_instance(self.context, instance=instance) + def test_diagnostics(self): # Make sure we can get diagnostics for an instance. expected_diagnostic = {'cpu0_time': 17300000000, @@ -1510,6 +1576,27 @@ class ComputeTestCase(BaseTestCase): instance=instance) self.compute.terminate_instance(self.context, instance=instance) + def test_run_instance_queries_macs(self): + # run_instance should ask the driver for node mac addresses and pass + # that to the network_api in use. + fake_network.unset_stub_network_methods(self.stubs) + instance = jsonutils.to_primitive(self._create_fake_instance()) + + macs = set(['01:23:45:67:89:ab']) + self.mox.StubOutWithMock(self.compute.network_api, + "allocate_for_instance") + self.compute.network_api.allocate_for_instance( + mox.IgnoreArg(), + mox.IgnoreArg(), + requested_networks=None, + vpn=False, macs=macs).AndReturn( + fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, + spectacular=True)) + self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance") + self.compute.driver.macs_for_instance(instance).AndReturn(macs) + self.mox.ReplayAll() + self.compute.run_instance(self.context, instance=instance) + def test_instance_set_to_error_on_uncaught_exception(self): # Test that instance is set to error state when exception is raised. instance = jsonutils.to_primitive(self._create_fake_instance()) @@ -1520,7 +1607,8 @@ class ComputeTestCase(BaseTestCase): mox.IgnoreArg(), mox.IgnoreArg(), requested_networks=None, - vpn=False).AndRaise(rpc_common.RemoteError()) + vpn=False, + macs=None).AndRaise(rpc_common.RemoteError()) fake_network.unset_stub_network_methods(self.stubs) @@ -2543,8 +2631,6 @@ class ComputeTestCase(BaseTestCase): 'setup_networks_on_host') self.mox.StubOutWithMock(self.compute.network_api, 'migrate_instance_finish') - self.mox.StubOutWithMock(self.compute.driver, - 'post_live_migration_at_destination') self.mox.StubOutWithMock(self.compute, '_get_power_state') self.mox.StubOutWithMock(self.compute, '_instance_update') @@ -2562,10 +2648,12 @@ class ComputeTestCase(BaseTestCase): self.compute.network_api.migrate_instance_finish(admin_ctxt, instance, migration) fake_net_info = [] + fake_block_dev_info = {'foo': 'bar'} self.compute.driver.post_live_migration_at_destination(admin_ctxt, - instance, - fake_net_info, - False) + instance, + fake_net_info, + False, + fake_block_dev_info) self.compute._get_power_state(admin_ctxt, instance).AndReturn( 'fake_power_state') @@ -2610,8 +2698,8 @@ class ComputeTestCase(BaseTestCase): self.assertEqual(task_states.POWERING_OFF, instances[0]['task_state']) def test_add_instance_fault(self): + instance = self._create_fake_instance() exc_info = None - instance_uuid = str(uuid.uuid4()) def fake_db_fault_create(ctxt, values): self.assertTrue(values['details'].startswith('test')) @@ -2621,7 +2709,8 @@ class ComputeTestCase(BaseTestCase): expected = { 'code': 500, 'message': 'NotImplementedError', - 'instance_uuid': instance_uuid, + 'instance_uuid': instance['uuid'], + 'host': self.compute.host } self.assertEquals(expected, values) @@ -2633,13 +2722,12 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid, - NotImplementedError('test'), - exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, instance, + NotImplementedError('test'), exc_info) def test_add_instance_fault_with_remote_error(self): + instance = self._create_fake_instance() exc_info = None - instance_uuid = str(uuid.uuid4()) def fake_db_fault_create(ctxt, values): self.assertTrue(values['details'].startswith('Remote error')) @@ -2649,8 +2737,9 @@ class ComputeTestCase(BaseTestCase): expected = { 'code': 500, - 'instance_uuid': instance_uuid, - 'message': 'My Test Message' + 'instance_uuid': instance['uuid'], + 'message': 'My Test Message', + 'host': self.compute.host } self.assertEquals(expected, values) @@ -2662,13 +2751,12 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid, - exc, - exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, instance, exc, + exc_info) def test_add_instance_fault_user_error(self): + instance = self._create_fake_instance() exc_info = None - instance_uuid = str(uuid.uuid4()) def fake_db_fault_create(ctxt, values): @@ -2676,7 +2764,8 @@ class ComputeTestCase(BaseTestCase): 'code': 400, 'message': 'Invalid', 'details': 'fake details', - 'instance_uuid': instance_uuid, + 'instance_uuid': instance['uuid'], + 'host': self.compute.host } self.assertEquals(expected, values) @@ -2690,26 +2779,27 @@ class ComputeTestCase(BaseTestCase): self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid, - user_exc, exc_info) + compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc, + exc_info) def test_add_instance_fault_no_exc_info(self): - instance_uuid = str(uuid.uuid4()) + instance = self._create_fake_instance() def fake_db_fault_create(ctxt, values): expected = { 'code': 500, 'message': 'NotImplementedError', 'details': 'test', - 'instance_uuid': instance_uuid, + 'instance_uuid': instance['uuid'], + 'host': self.compute.host } self.assertEquals(expected, values) self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create) ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid, - NotImplementedError('test')) + compute_utils.add_instance_fault_from_exc(ctxt, instance, + NotImplementedError('test')) def test_cleanup_running_deleted_instances(self): admin_context = context.get_admin_context() @@ -2871,7 +2961,7 @@ class ComputeTestCase(BaseTestCase): call_info['expected_instance'] = instances[0] self.compute._heal_instance_info_cache(ctxt) self.assertEqual(call_info['get_all_by_host'], 2) - # Stays the same, beacuse the instance came from the DB + # Stays the same, because the instance came from the DB self.assertEqual(call_info['get_by_uuid'], 3) self.assertEqual(call_info['get_nw_info'], 4) @@ -3159,7 +3249,6 @@ class ComputeTestCase(BaseTestCase): self.compute._destroy_evacuated_instances(fake_context) def test_init_host(self): - our_host = self.compute.host fake_context = 'fake-context' startup_instances = ['inst1', 'inst2', 'inst3'] @@ -3212,7 +3301,39 @@ class ComputeTestCase(BaseTestCase): self.mox.ReplayAll() self.compute.init_host() - # VerifyCall done by tearDown + # tearDown() uses context.get_admin_context(), so we have + # to do the verification here and unstub it. + self.mox.VerifyAll() + self.mox.UnsetStubs() + + def test_init_instance_failed_resume_sets_error(self): + instance = { + 'uuid': 'fake-uuid', + 'info_cache': None, + 'power_state': power_state.RUNNING, + 'vm_state': vm_states.ACTIVE, + } + self.flags(resume_guests_state_on_host_boot=True) + self.mox.StubOutWithMock(self.compute, '_get_power_state') + self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs') + self.mox.StubOutWithMock(self.compute.driver, + 'resume_state_on_host_boot') + self.mox.StubOutWithMock(self.compute, + '_get_instance_volume_block_device_info') + self.mox.StubOutWithMock(self.compute, + '_set_instance_error_state') + self.compute._get_power_state(mox.IgnoreArg(), + instance).AndReturn(power_state.SHUTDOWN) + self.compute.driver.plug_vifs(instance, mox.IgnoreArg()) + self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(), + instance['uuid']).AndReturn('fake-bdm') + self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(), + instance, mox.IgnoreArg(), + 'fake-bdm').AndRaise(test.TestingException) + self.compute._set_instance_error_state(mox.IgnoreArg(), + instance['uuid']) + self.mox.ReplayAll() + self.compute._init_instance('fake-context', instance) def test_get_instances_on_driver(self): fake_context = context.get_admin_context() @@ -3866,6 +3987,38 @@ class ComputeAPITestCase(BaseTestCase): db.instance_destroy(self.context, instance['uuid']) + def test_delete_in_resizing(self): + def fake_quotas_reserve(context, expire=None, project_id=None, + **deltas): + old_type = instance_types.get_instance_type_by_name('m1.tiny') + # ensure using old instance type to create reservations + self.assertEqual(deltas['cores'], -old_type['vcpus']) + self.assertEqual(deltas['ram'], -old_type['memory_mb']) + + self.stubs.Set(QUOTAS, 'reserve', fake_quotas_reserve) + + instance, instance_uuid = self._run_instance(params={ + 'host': CONF.host}) + + # create a fake migration record (manager does this) + new_inst_type = instance_types.get_instance_type_by_name('m1.small') + db.migration_create(self.context.elevated(), + {'instance_uuid': instance['uuid'], + 'old_instance_type_id': instance['instance_type_id'], + 'new_instance_type_id': new_inst_type['id'], + 'status': 'post-migrating'}) + + # update instance type to resized one + db.instance_update(self.context, instance['uuid'], + {'instance_type_id': new_inst_type['id'], + 'vcpus': new_inst_type['vcpus'], + 'memory_mb': new_inst_type['memory_mb'], + 'task_state': task_states.RESIZE_FINISH}) + + self.compute_api.delete(self.context, instance) + + db.instance_destroy(self.context, instance['uuid']) + def test_delete_in_resized(self): instance, instance_uuid = self._run_instance(params={ 'host': CONF.host}) @@ -4198,12 +4351,10 @@ class ComputeAPITestCase(BaseTestCase): def _stub_out_reboot(self, device_name): def fake_reboot_instance(rpcapi, context, instance, block_device_info, - network_info, reboot_type): self.assertEqual( block_device_info['block_device_mapping'][0]['mount_device'], device_name) - self.assertEqual(network_info[0]['network']['bridge'], 'fake_br1') self.stubs.Set(nova.compute.rpcapi.ComputeAPI, 'reboot_instance', fake_reboot_instance) @@ -4376,6 +4527,31 @@ class ComputeAPITestCase(BaseTestCase): db.instance_destroy(self.context, instance['uuid']) + def test_snapshot_given_image_uuid(self): + """Ensure a snapshot of an instance can be created when image UUID + is already known. + """ + instance = self._create_fake_instance() + name = 'snap1' + extra_properties = {'extra_param': 'value1'} + recv_meta = self.compute_api.snapshot(self.context, instance, name, + extra_properties) + image_id = recv_meta['id'] + + def fake_show(meh, context, id): + return recv_meta + + instance = db.instance_update(self.context, instance['uuid'], + {'task_state': None}) + fake_image.stub_out_image_service(self.stubs) + self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) + image = self.compute_api.snapshot(self.context, instance, name, + extra_properties, + image_id=image_id) + self.assertEqual(image, recv_meta) + + db.instance_destroy(self.context, instance['uuid']) + def test_snapshot_minram_mindisk_VHD(self): """Ensure a snapshots min_ram and min_disk are correct. @@ -4383,27 +4559,25 @@ class ComputeAPITestCase(BaseTestCase): and min_disk set to that of the original instances flavor. """ - self.fake_image['disk_format'] = 'vhd' + self.fake_image.update(disk_format='vhd', + min_ram=1, min_disk=1) self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show) - instance = self._create_fake_instance() - inst_params = {'root_gb': 2, 'memory_mb': 256} - instance['instance_type'].update(inst_params) + instance = self._create_fake_instance(type_name='m1.small') image = self.compute_api.snapshot(self.context, instance, 'snap1', {'extra_param': 'value1'}) self.assertEqual(image['name'], 'snap1') - self.assertEqual(image['min_ram'], 256) - self.assertEqual(image['min_disk'], 2) + instance_type = instance['instance_type'] + self.assertEqual(image['min_ram'], instance_type['memory_mb']) + self.assertEqual(image['min_disk'], instance_type['root_gb']) properties = image['properties'] self.assertTrue('backup_type' not in properties) self.assertEqual(properties['image_type'], 'snapshot') self.assertEqual(properties['instance_uuid'], instance['uuid']) self.assertEqual(properties['extra_param'], 'value1') - db.instance_destroy(self.context, instance['uuid']) - def test_snapshot_minram_mindisk(self): """Ensure a snapshots min_ram and min_disk are correct. @@ -4469,7 +4643,10 @@ class ComputeAPITestCase(BaseTestCase): def fake_show(*args): raise exception.ImageNotFound(image_id="fake") - self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) + if not self.__class__.__name__ == "CellsComputeAPITestCase": + # Cells tests will call this a 2nd time in child cell with + # the newly created image_id, and we want that one to succeed. + self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) instance = self._create_fake_instance() @@ -5078,14 +5255,14 @@ class ComputeAPITestCase(BaseTestCase): self.assertTrue(instance3['uuid'] in instance_uuids) self.assertTrue(instance4['uuid'] in instance_uuids) - # multiple criterias as a dict + # multiple criteria as a dict instances = self.compute_api.get_all(c, search_opts={'metadata': {'key3': 'value3', 'key4': 'value4'}}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0]['uuid'], instance4['uuid']) - # multiple criterias as a list + # multiple criteria as a list instances = self.compute_api.get_all(c, search_opts={'metadata': [{'key4': 'value4'}, {'key3': 'value3'}]}) @@ -5157,6 +5334,24 @@ class ComputeAPITestCase(BaseTestCase): db.instance_destroy(_context, instance['uuid']) + def test_disallow_metadata_changes_during_building(self): + def fake_change_instance_metadata(inst, ctxt, diff, instance=None, + instance_uuid=None): + pass + self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata', + fake_change_instance_metadata) + + instance = self._create_fake_instance({'vm_state': vm_states.BUILDING}) + instance = dict(instance) + + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.delete_instance_metadata, self.context, + instance, "key") + + self.assertRaises(exception.InstanceInvalidState, + self.compute_api.update_instance_metadata, self.context, + instance, "key") + def test_get_instance_faults(self): # Get an instances latest fault. instance = self._create_fake_instance() @@ -5451,6 +5646,50 @@ class ComputeAPITestCase(BaseTestCase): db.instance_destroy(self.context, instance['uuid']) + def test_spice_console(self): + # Make sure we can a spice console for an instance. + + fake_instance = {'uuid': 'fake_uuid', + 'host': 'fake_compute_host'} + fake_console_type = "spice-html5" + fake_connect_info = {'token': 'fake_token', + 'console_type': fake_console_type, + 'host': 'fake_console_host', + 'port': 'fake_console_port', + 'internal_access_path': 'fake_access_path'} + fake_connect_info2 = copy.deepcopy(fake_connect_info) + fake_connect_info2['access_url'] = 'fake_console_url' + + self.mox.StubOutWithMock(rpc, 'call') + + rpc_msg1 = {'method': 'get_spice_console', + 'args': {'instance': fake_instance, + 'console_type': fake_console_type}, + 'version': '2.24'} + rpc_msg2 = {'method': 'authorize_console', + 'args': fake_connect_info, + 'version': '1.0'} + + rpc.call(self.context, 'compute.%s' % fake_instance['host'], + rpc_msg1, None).AndReturn(fake_connect_info2) + rpc.call(self.context, CONF.consoleauth_topic, + rpc_msg2, None).AndReturn(None) + + self.mox.ReplayAll() + + console = self.compute_api.get_spice_console(self.context, + fake_instance, fake_console_type) + self.assertEqual(console, {'url': 'fake_console_url'}) + + def test_get_spice_console_no_host(self): + instance = self._create_fake_instance(params={'host': ''}) + + self.assertRaises(exception.InstanceNotReady, + self.compute_api.get_spice_console, + self.context, instance, 'spice') + + db.instance_destroy(self.context, instance['uuid']) + def test_get_backdoor_port(self): # Test api call to get backdoor_port. fake_backdoor_port = 59697 @@ -6090,81 +6329,6 @@ class ComputePolicyTestCase(BaseTestCase): availability_zone='1:1') -class ComputeHostAPITestCase(BaseTestCase): - def setUp(self): - super(ComputeHostAPITestCase, self).setUp() - self.host_api = compute_api.HostAPI() - - def _rpc_call_stub(self, call_info): - def fake_rpc_call(context, topic, msg, timeout=None): - call_info['context'] = context - call_info['topic'] = topic - call_info['msg'] = msg - self.stubs.Set(rpc, 'call', fake_rpc_call) - - def _pretend_fake_host_exists(self, ctxt): - """Sets it so that the host API always thinks that 'fake_host' - exists""" - self.mox.StubOutWithMock(self.host_api, 'does_host_exist') - self.host_api.does_host_exist(ctxt, 'fake_host').AndReturn(True) - self.mox.ReplayAll() - - def test_set_host_enabled(self): - ctxt = context.get_admin_context() - call_info = {} - self._rpc_call_stub(call_info) - - self._pretend_fake_host_exists(ctxt) - self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled') - self.assertEqual(call_info['context'], ctxt) - self.assertEqual(call_info['topic'], 'compute.fake_host') - self.assertEqual(call_info['msg'], - {'method': 'set_host_enabled', - 'args': {'enabled': 'fake_enabled'}, - 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) - - def test_get_host_uptime(self): - ctxt = context.RequestContext('fake', 'fake') - call_info = {} - self._rpc_call_stub(call_info) - - self._pretend_fake_host_exists(ctxt) - self.host_api.get_host_uptime(ctxt, 'fake_host') - self.assertEqual(call_info['context'], ctxt) - self.assertEqual(call_info['topic'], 'compute.fake_host') - self.assertEqual(call_info['msg'], - {'method': 'get_host_uptime', - 'args': {}, - 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) - - def test_host_power_action(self): - ctxt = context.get_admin_context() - call_info = {} - self._rpc_call_stub(call_info) - self._pretend_fake_host_exists(ctxt) - self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action') - self.assertEqual(call_info['context'], ctxt) - self.assertEqual(call_info['topic'], 'compute.fake_host') - self.assertEqual(call_info['msg'], - {'method': 'host_power_action', - 'args': {'action': 'fake_action'}, - 'version': - compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) - - def test_set_host_maintenance(self): - ctxt = context.get_admin_context() - call_info = {} - self._rpc_call_stub(call_info) - self._pretend_fake_host_exists(ctxt) - self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode') - self.assertEqual(call_info['context'], ctxt) - self.assertEqual(call_info['topic'], 'compute.fake_host') - self.assertEqual(call_info['msg'], - {'method': 'host_maintenance_mode', - 'args': {'host': 'fake_host', 'mode': 'fake_mode'}, - 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) - - class KeypairAPITestCase(BaseTestCase): def setUp(self): super(KeypairAPITestCase, self).setUp() @@ -6284,7 +6448,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase): """ Some instance-types are marked 'disabled' which means that they will not show up in customer-facing listings. We do, however, want those - instance-types to be availble for emergency migrations and for rebuilding + instance-types to be available for emergency migrations and for rebuilding of existing instances. One legitimate use of the 'disabled' field would be when phasing out a @@ -6534,7 +6698,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, - instance_uuid, exc_info[0], exc_info=exc_info) + self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance).AndRaise(InnerTestingException("Error")) self.compute._log_original_error(exc_info, instance_uuid) @@ -6584,7 +6748,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): except Exception: exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, - instance_uuid, exc_info[0], exc_info=exc_info) + self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance) self.compute._reschedule(self.context, None, {}, instance_uuid, @@ -6612,7 +6776,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): exc_info = sys.exc_info() compute_utils.add_instance_fault_from_exc(self.context, - instance_uuid, exc_info[0], exc_info=exc_info) + self.instance, exc_info[0], exc_info=exc_info) self.compute._deallocate_network(self.context, self.instance) self.compute._reschedule(self.context, None, {}, instance_uuid, diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py index aa4b448d4..3c25f9b43 100644 --- a/nova/tests/compute/test_compute_cells.py +++ b/nova/tests/compute/test_compute_cells.py @@ -16,7 +16,11 @@ """ Tests For Compute w/ Cells """ +import functools + from nova.compute import cells_api as compute_cells_api +from nova import db +from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.tests.compute import test_compute @@ -28,17 +32,57 @@ ORIG_COMPUTE_API = None def stub_call_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) + original_instance = kwargs.pop('original_instance', None) + if original_instance: + instance = original_instance + # Restore this in 'child cell DB' + db.instance_update(context, instance['uuid'], + dict(vm_state=instance['vm_state'], + task_state=instance['task_state'])) + return fn(context, instance, *args, **kwargs) def stub_cast_to_cells(context, instance, method, *args, **kwargs): fn = getattr(ORIG_COMPUTE_API, method) + original_instance = kwargs.pop('original_instance', None) + if original_instance: + instance = original_instance + # Restore this in 'child cell DB' + db.instance_update(context, instance['uuid'], + dict(vm_state=instance['vm_state'], + task_state=instance['task_state'])) fn(context, instance, *args, **kwargs) -def deploy_stubs(stubs, api): - stubs.Set(api, '_call_to_cells', stub_call_to_cells) - stubs.Set(api, '_cast_to_cells', stub_cast_to_cells) +def deploy_stubs(stubs, api, original_instance=None): + call = stub_call_to_cells + cast = stub_cast_to_cells + + if original_instance: + kwargs = dict(original_instance=original_instance) + call = functools.partial(stub_call_to_cells, **kwargs) + cast = functools.partial(stub_cast_to_cells, **kwargs) + + stubs.Set(api, '_call_to_cells', call) + stubs.Set(api, '_cast_to_cells', cast) + + +def wrap_create_instance(func): + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + instance = self._create_fake_instance() + + def fake(*args, **kwargs): + return instance + + self.stubs.Set(self, '_create_fake_instance', fake) + original_instance = jsonutils.to_primitive(instance) + deploy_stubs(self.stubs, self.compute_api, + original_instance=original_instance) + return func(self, *args, **kwargs) + + return wrapper class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): @@ -84,6 +128,42 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def test_get_backdoor_port(self): self.skipTest("Test is incompatible with cells.") + def test_snapshot_given_image_uuid(self): + self.skipTest("Test doesn't apply to API cell.") + + @wrap_create_instance + def test_snapshot(self): + return super(CellsComputeAPITestCase, self).test_snapshot() + + @wrap_create_instance + def test_snapshot_image_metadata_inheritance(self): + return super(CellsComputeAPITestCase, + self).test_snapshot_image_metadata_inheritance() + + @wrap_create_instance + def test_snapshot_minram_mindisk(self): + return super(CellsComputeAPITestCase, + self).test_snapshot_minram_mindisk() + + @wrap_create_instance + def test_snapshot_minram_mindisk_VHD(self): + return super(CellsComputeAPITestCase, + self).test_snapshot_minram_mindisk_VHD() + + @wrap_create_instance + def test_snapshot_minram_mindisk_img_missing_minram(self): + return super(CellsComputeAPITestCase, + self).test_snapshot_minram_mindisk_img_missing_minram() + + @wrap_create_instance + def test_snapshot_minram_mindisk_no_image(self): + return super(CellsComputeAPITestCase, + self).test_snapshot_minram_mindisk_no_image() + + @wrap_create_instance + def test_backup(self): + return super(CellsComputeAPITestCase, self).test_backup() + class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py index f29c68627..6e7227d4c 100644 --- a/nova/tests/compute/test_compute_utils.py +++ b/nova/tests/compute/test_compute_utils.py @@ -69,8 +69,11 @@ class ComputeValidateDeviceTestCase(test.TestCase): lambda context, instance: self.data) def _validate_device(self, device=None): + bdms = db.block_device_mapping_get_all_by_instance( + self.context, self.instance['uuid']) return compute_utils.get_device_name_for_instance(self.context, self.instance, + bdms, device) @staticmethod diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py index f00245d1e..95d3c4926 100644 --- a/nova/tests/compute/test_host_api.py +++ b/nova/tests/compute/test_host_api.py @@ -13,93 +13,114 @@ # License for the specific language governing permissions and limitations # under the License. -from nova.compute import api +from nova import compute +from nova.compute import rpcapi as compute_rpcapi from nova import context -from nova import db -from nova import exception +from nova.openstack.common import rpc from nova import test -from nova.tests import fake_hosts -class HostApiTestCase(test.TestCase): - """ - Tests 'host' subset of the compute api - """ - +class ComputeHostAPITestCase(test.TestCase): def setUp(self): - super(HostApiTestCase, self).setUp() - self.compute_rpcapi = api.compute_rpcapi - self.api = api.HostAPI() + super(ComputeHostAPITestCase, self).setUp() + self.host_api = compute.HostAPI() + self.ctxt = context.get_admin_context() - def test_bad_host_set_enabled(self): - """ - Tests that actions on single hosts that don't exist blow up without - having to reach the host via rpc. Should raise HostNotFound if you - try to update a host that is not in the DB + def _mock_rpc_call(self, expected_message, result=None): + if result is None: + result = 'fake-result' + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(self.ctxt, 'compute.fake_host', + expected_message, None).AndReturn(result) + + def _mock_assert_host_exists(self): + """Sets it so that the host API always thinks that 'fake_host' + exists. """ - self.assertRaises(exception.HostNotFound, self.api.set_host_enabled, - context.get_admin_context(), "bogus_host_name", False) + self.mox.StubOutWithMock(self.host_api, '_assert_host_exists') + self.host_api._assert_host_exists(self.ctxt, 'fake_host') + + def test_set_host_enabled(self): + self._mock_assert_host_exists() + self._mock_rpc_call( + {'method': 'set_host_enabled', + 'args': {'enabled': 'fake_enabled'}, + 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) + + self.mox.ReplayAll() + result = self.host_api.set_host_enabled(self.ctxt, 'fake_host', + 'fake_enabled') + self.assertEqual('fake-result', result) + + def test_get_host_uptime(self): + self._mock_assert_host_exists() + self._mock_rpc_call( + {'method': 'get_host_uptime', + 'args': {}, + 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) + self.mox.ReplayAll() + result = self.host_api.get_host_uptime(self.ctxt, 'fake_host') + self.assertEqual('fake-result', result) + + def test_host_power_action(self): + self._mock_assert_host_exists() + self._mock_rpc_call( + {'method': 'host_power_action', + 'args': {'action': 'fake_action'}, + 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) + self.mox.ReplayAll() + result = self.host_api.host_power_action(self.ctxt, 'fake_host', + 'fake_action') + self.assertEqual('fake-result', result) - def test_list_compute_hosts(self): - ctx = context.get_admin_context() - self.mox.StubOutWithMock(db, 'service_get_all') - db.service_get_all(ctx, False).AndReturn(fake_hosts.SERVICES_LIST) + def test_set_host_maintenance(self): + self._mock_assert_host_exists() + self._mock_rpc_call( + {'method': 'host_maintenance_mode', + 'args': {'host': 'fake_host', 'mode': 'fake_mode'}, + 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}) self.mox.ReplayAll() - compute_hosts = self.api.list_hosts(ctx, service="compute") + result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host', + 'fake_mode') + self.assertEqual('fake-result', result) + + def test_service_get_all(self): + services = [dict(id=1, key1='val1', key2='val2', topic='compute', + host='host1'), + dict(id=2, key1='val2', key3='val3', topic='compute', + host='host2')] + exp_services = [] + for service in services: + exp_service = {} + exp_service.update(availability_zone='nova', **service) + exp_services.append(exp_service) + + self.mox.StubOutWithMock(self.host_api.db, + 'service_get_all') + + # Test no filters + self.host_api.db.service_get_all(self.ctxt, False).AndReturn( + services) + self.mox.ReplayAll() + result = self.host_api.service_get_all(self.ctxt) self.mox.VerifyAll() - expected = [host for host in fake_hosts.HOST_LIST - if host["service"] == "compute"] - self.assertEqual(expected, compute_hosts) + self.assertEqual(exp_services, result) - def test_describe_host(self): - """ - Makes sure that describe_host returns the correct information - given our fake input. - """ - ctx = context.get_admin_context() - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') - host_name = 'host_c1' - db.service_get_all_compute_by_host(ctx, host_name).AndReturn( - [{'host': 'fake_host', - 'compute_node': [ - {'vcpus': 4, - 'vcpus_used': 1, - 'memory_mb': 8192, - 'memory_mb_used': 2048, - 'local_gb': 1024, - 'local_gb_used': 648} - ] - }]) - self.mox.StubOutWithMock(db, 'instance_get_all_by_host') - db.instance_get_all_by_host(ctx, 'fake_host').AndReturn( - [{'project_id': 42, - 'vcpus': 1, - 'memory_mb': 2048, - 'root_gb': 648, - 'ephemeral_gb': 0, - }]) + # Test no filters #2 + self.mox.ResetAll() + self.host_api.db.service_get_all(self.ctxt, False).AndReturn( + services) + self.mox.ReplayAll() + result = self.host_api.service_get_all(self.ctxt, filters={}) + self.mox.VerifyAll() + self.assertEqual(exp_services, result) + + # Test w/ filter + self.mox.ResetAll() + self.host_api.db.service_get_all(self.ctxt, False).AndReturn( + services) self.mox.ReplayAll() - result = self.api.describe_host(ctx, host_name) - self.assertEqual(result, - [{'resource': {'cpu': 4, - 'disk_gb': 1024, - 'host': 'host_c1', - 'memory_mb': 8192, - 'project': '(total)'}}, - {'resource': {'cpu': 1, - 'disk_gb': 648, - 'host': 'host_c1', - 'memory_mb': 2048, - 'project': '(used_now)'}}, - {'resource': {'cpu': 1, - 'disk_gb': 648, - 'host': 'host_c1', - 'memory_mb': 2048, - 'project': '(used_max)'}}, - {'resource': {'cpu': 1, - 'disk_gb': 648, - 'host': 'host_c1', - 'memory_mb': 2048, - 'project': 42}}] - ) + result = self.host_api.service_get_all(self.ctxt, + filters=dict(key1='val2')) self.mox.VerifyAll() + self.assertEqual([exp_services[1]], result) diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py index 78ed0cea7..27ee7aaba 100644 --- a/nova/tests/compute/test_multiple_nodes.py +++ b/nova/tests/compute/test_multiple_nodes.py @@ -80,6 +80,9 @@ class MultiNodeComputeTestCase(BaseTestCase): super(MultiNodeComputeTestCase, self).setUp() self.flags(compute_driver='nova.virt.fake.FakeDriver') self.compute = importutils.import_object(CONF.compute_manager) + self.flags(use_local=True, group='conductor') + self.conductor = self.start_service('conductor', + manager=CONF.conductor.manager) def test_update_available_resource_add_remove_node(self): ctx = context.get_admin_context() diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 3bfd51461..53d92a13f 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -297,8 +297,8 @@ class MissingComputeNodeTestCase(BaseTestCase): super(MissingComputeNodeTestCase, self).setUp() self.tracker = self._tracker() - self.stubs.Set(db, 'service_get_all_compute_by_host', - self._fake_service_get_all_compute_by_host) + self.stubs.Set(db, 'service_get_by_compute_host', + self._fake_service_get_by_compute_host) self.stubs.Set(db, 'compute_node_create', self._fake_create_compute_node) @@ -306,10 +306,10 @@ class MissingComputeNodeTestCase(BaseTestCase): self.created = True return self._create_compute_node() - def _fake_service_get_all_compute_by_host(self, ctx, host): + def _fake_service_get_by_compute_host(self, ctx, host): # return a service with no joined compute service = self._create_service() - return [service] + return service def test_create_compute_node(self): self.tracker.update_available_resource(self.context) @@ -330,8 +330,8 @@ class BaseTrackerTestCase(BaseTestCase): self.tracker = self._tracker() self._migrations = {} - self.stubs.Set(db, 'service_get_all_compute_by_host', - self._fake_service_get_all_compute_by_host) + self.stubs.Set(db, 'service_get_by_compute_host', + self._fake_service_get_by_compute_host) self.stubs.Set(db, 'compute_node_update', self._fake_compute_node_update) self.stubs.Set(db, 'migration_update', @@ -342,10 +342,10 @@ class BaseTrackerTestCase(BaseTestCase): self.tracker.update_available_resource(self.context) self.limits = self._limits() - def _fake_service_get_all_compute_by_host(self, ctx, host): + def _fake_service_get_by_compute_host(self, ctx, host): self.compute = self._create_compute_node() self.service = self._create_service(host, compute=self.compute) - return [self.service] + return self.service def _fake_compute_node_update(self, ctx, compute_node_id, values, prune_stats=False): diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py index a31d9a14b..b81e049bf 100644 --- a/nova/tests/compute/test_rpcapi.py +++ b/nova/tests/compute/test_rpcapi.py @@ -165,6 +165,11 @@ class ComputeRpcAPITestCase(test.TestCase): self._test_compute_api('get_vnc_console', 'call', instance=self.fake_instance, console_type='type') + def test_get_spice_console(self): + self._test_compute_api('get_spice_console', 'call', + instance=self.fake_instance, console_type='type', + version='2.24') + def test_host_maintenance_mode(self): self._test_compute_api('host_maintenance_mode', 'call', host_param='param', mode='mode', host='host') @@ -236,9 +241,8 @@ class ComputeRpcAPITestCase(test.TestCase): self._test_compute_api('reboot_instance', 'cast', instance=self.fake_instance, block_device_info={}, - network_info={}, reboot_type='type', - version='2.5') + version='2.23') def test_rebuild_instance(self): self._test_compute_api('rebuild_instance', 'cast', diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 46fadf4f0..30d176bbd 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -130,6 +130,16 @@ class _BaseTestCase(object): 'fake-window', 'fake-host') + def test_migration_get_in_progress_by_host_and_node(self): + self.mox.StubOutWithMock(db, + 'migration_get_in_progress_by_host_and_node') + db.migration_get_in_progress_by_host_and_node( + self.context, 'fake-host', 'fake-node').AndReturn('fake-result') + self.mox.ReplayAll() + result = self.conductor.migration_get_in_progress_by_host_and_node( + self.context, 'fake-host', 'fake-node') + self.assertEqual(result, 'fake-result') + def test_migration_create(self): inst = {'uuid': 'fake-uuid', 'host': 'fake-host', @@ -325,14 +335,23 @@ class _BaseTestCase(object): def test_instance_get_active_by_window(self): self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined') - db.instance_get_active_by_window_joined(self.context, 'fake-begin', - 'fake-end', 'fake-proj', - 'fake-host') + db.instance_get_active_by_window(self.context, 'fake-begin', + 'fake-end', 'fake-proj', + 'fake-host') self.mox.ReplayAll() self.conductor.instance_get_active_by_window(self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host') + def test_instance_get_active_by_window_joined(self): + self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined') + db.instance_get_active_by_window_joined(self.context, 'fake-begin', + 'fake-end', 'fake-proj', + 'fake-host') + self.mox.ReplayAll() + self.conductor.instance_get_active_by_window_joined( + self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host') + def test_instance_destroy(self): self.mox.StubOutWithMock(db, 'instance_destroy') db.instance_destroy(self.context, 'fake-uuid') @@ -388,6 +407,25 @@ class _BaseTestCase(object): result = self.conductor.ping(self.context, 'foo') self.assertEqual(result, {'service': 'conductor', 'arg': 'foo'}) + def test_compute_node_create(self): + self.mox.StubOutWithMock(db, 'compute_node_create') + db.compute_node_create(self.context, 'fake-values').AndReturn( + 'fake-result') + self.mox.ReplayAll() + result = self.conductor.compute_node_create(self.context, + 'fake-values') + self.assertEqual(result, 'fake-result') + + def test_compute_node_update(self): + node = {'id': 'fake-id'} + self.mox.StubOutWithMock(db, 'compute_node_update') + db.compute_node_update(self.context, node['id'], 'fake-values', + False).AndReturn('fake-result') + self.mox.ReplayAll() + result = self.conductor.compute_node_update(self.context, node, + 'fake-values', False) + self.assertEqual(result, 'fake-result') + class ConductorTestCase(_BaseTestCase, test.TestCase): """Conductor Manager Tests.""" @@ -451,12 +489,31 @@ class ConductorTestCase(_BaseTestCase, test.TestCase): self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort') - def _test_stubbed(self, name, dbargs, condargs): + def test_instance_get_all_by_host(self): + self.mox.StubOutWithMock(db, 'instance_get_all_by_host') + self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node') + db.instance_get_all_by_host(self.context.elevated(), + 'host').AndReturn('result') + db.instance_get_all_by_host_and_node(self.context.elevated(), 'host', + 'node').AndReturn('result') + self.mox.ReplayAll() + result = self.conductor.instance_get_all_by_host(self.context, 'host') + self.assertEqual(result, 'result') + result = self.conductor.instance_get_all_by_host(self.context, 'host', + 'node') + self.assertEqual(result, 'result') + + def _test_stubbed(self, name, dbargs, condargs, + db_result_listified=False): + self.mox.StubOutWithMock(db, name) getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') self.mox.ReplayAll() result = self.conductor.service_get_all_by(self.context, **condargs) - self.assertEqual(result, 'fake-result') + if db_result_listified: + self.assertEqual(['fake-result'], result) + else: + self.assertEqual('fake-result', result) def test_service_get_all(self): self._test_stubbed('service_get_all', (), {}) @@ -476,10 +533,11 @@ class ConductorTestCase(_BaseTestCase, test.TestCase): ('host',), dict(host='host')) - def test_service_get_all_compute_by_host(self): - self._test_stubbed('service_get_all_compute_by_host', + def test_service_get_by_compute_host(self): + self._test_stubbed('service_get_by_compute_host', ('host',), - dict(topic='compute', host='host')) + dict(topic='compute', host='host'), + db_result_listified=True) def test_service_get_by_args(self): self._test_stubbed('service_get_by_args', @@ -547,12 +605,16 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): self.conductor.instance_get_all_by_filters(self.context, filters, 'fake-key', 'fake-sort') - def _test_stubbed(self, name, dbargs, condargs): + def _test_stubbed(self, name, dbargs, condargs, + db_result_listified=False): self.mox.StubOutWithMock(db, name) getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') self.mox.ReplayAll() result = self.conductor.service_get_all_by(self.context, **condargs) - self.assertEqual(result, 'fake-result') + if db_result_listified: + self.assertEqual(['fake-result'], result) + else: + self.assertEqual('fake-result', result) def test_service_get_all(self): self._test_stubbed('service_get_all', (), {}) @@ -572,10 +634,11 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): ('host',), dict(host='host')) - def test_service_get_all_compute_by_host(self): - self._test_stubbed('service_get_all_compute_by_host', + def test_service_get_by_compute_host(self): + self._test_stubbed('service_get_by_compute_host', ('host',), - dict(topic='compute', host='host')) + dict(topic='compute', host='host'), + db_result_listified=True) class ConductorAPITestCase(_BaseTestCase, test.TestCase): @@ -645,19 +708,22 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): def test_instance_get_all(self): self.mox.StubOutWithMock(db, 'instance_get_all_by_filters') db.instance_get_all(self.context) - db.instance_get_all_by_host(self.context.elevated(), 'fake-host') db.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc') self.mox.ReplayAll() self.conductor.instance_get_all(self.context) - self.conductor.instance_get_all_by_host(self.context, 'fake-host') self.conductor.instance_get_all_by_filters(self.context, {'name': 'fake-inst'}, 'updated_at', 'asc') def _test_stubbed(self, name, *args, **kwargs): + if args and isinstance(args[0], FakeContext): + ctxt = args[0] + args = args[1:] + else: + ctxt = self.context self.mox.StubOutWithMock(db, name) - getattr(db, name)(self.context, *args).AndReturn('fake-result') + getattr(db, name)(ctxt, *args).AndReturn('fake-result') if name == 'service_destroy': # TODO(russellb) This is a hack ... SetUp() starts the conductor() # service. There is a cleanup step that runs after this test which @@ -681,8 +747,8 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): def test_service_get_all_by_host(self): self._test_stubbed('service_get_all_by_host', 'host') - def test_service_get_all_compute_by_host(self): - self._test_stubbed('service_get_all_compute_by_host', 'host') + def test_service_get_by_compute_host(self): + self._test_stubbed('service_get_by_compute_host', 'host') def test_service_create(self): self._test_stubbed('service_create', {}) @@ -690,6 +756,22 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): def test_service_destroy(self): self._test_stubbed('service_destroy', '', returns=False) + def test_service_update(self): + ctxt = self.context + self.mox.StubOutWithMock(db, 'service_update') + db.service_update(ctxt, '', {}).AndReturn('fake-result') + self.mox.ReplayAll() + result = self.conductor.service_update(self.context, {'id': ''}, {}) + self.assertEqual(result, 'fake-result') + + def test_instance_get_all_by_host(self): + self._test_stubbed('instance_get_all_by_host', + self.context.elevated(), 'host') + + def test_instance_get_all_by_host_and_node(self): + self._test_stubbed('instance_get_all_by_host_and_node', + self.context.elevated(), 'host', 'node') + def test_ping(self): timeouts = [] calls = dict(count=0) diff --git a/nova/tests/fake_imagebackend.py b/nova/tests/fake_imagebackend.py index 978c879fd..c284a5042 100644 --- a/nova/tests/fake_imagebackend.py +++ b/nova/tests/fake_imagebackend.py @@ -28,7 +28,7 @@ class Backend(object): def image(self, instance, name, image_type=''): class FakeImage(imagebackend.Image): def __init__(self, instance, name): - self.path = os.path.join(instance, name) + self.path = os.path.join(instance['name'], name) def create_image(self, prepare_template, base, size, *args, **kwargs): diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py index bb789b74a..b3d842468 100644 --- a/nova/tests/fake_libvirt_utils.py +++ b/nova/tests/fake_libvirt_utils.py @@ -17,6 +17,12 @@ import os import StringIO +from nova.openstack.common import cfg + + +CONF = cfg.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') + files = {'console.log': True} disk_sizes = {} @@ -133,3 +139,8 @@ def get_fs_info(path): def fetch_image(context, target, image_id, user_id, project_id): pass + + +def get_instance_path(instance): + # TODO(mikal): we should really just call the real one here + return os.path.join(CONF.instances_path, instance['name']) diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index c5d160209..04e4adbbd 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -42,6 +42,7 @@ policy_data = """ "compute:unlock": "", "compute:get_vnc_console": "", + "compute:get_spice_console": "", "compute:get_console_output": "", "compute:associate_floating_ip": "", @@ -104,6 +105,7 @@ policy_data = """ "compute_extension:admin_actions:migrate": "", "compute_extension:aggregates": "", "compute_extension:agents": "", + "compute_extension:cells": "", "compute_extension:certificates": "", "compute_extension:cloudpipe": "", "compute_extension:cloudpipe_update": "", @@ -136,10 +138,10 @@ policy_data = """ "compute_extension:instance_usage_audit_log": "", "compute_extension:keypairs": "", "compute_extension:multinic": "", - "compute_extension:admin_networks": "", - "compute_extension:admin_networks:view": "", + "compute_extension:networks": "", + "compute_extension:networks:view": "", "compute_extension:networks_associate": "", - "compute_extension:os-networks": "", + "compute_extension:os-tenant-networks": "", "compute_extension:quotas:show": "", "compute_extension:quotas:update": "", "compute_extension:quota_classes": "", @@ -156,6 +158,8 @@ policy_data = """ "compute_extension:volumes": "", "compute_extension:volumetypes": "", "compute_extension:zones": "", + "compute_extension:availability_zone:list": "", + "compute_extension:availability_zone:detail": "is_admin:True", "volume:create": "", diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py index 8d9561c7e..a573b7d1c 100644 --- a/nova/tests/fakelibvirt.py +++ b/nova/tests/fakelibvirt.py @@ -414,6 +414,7 @@ class Domain(object): <input type='tablet' bus='usb'/> <input type='mouse' bus='ps2'/> <graphics type='vnc' port='-1' autoport='yes'/> + <graphics type='spice' port='-1' autoport='yes'/> <video> <model type='cirrus' vram='9216' heads='1'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x02' diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz Binary files differindex 861c1ee8e..df40b08c0 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz Binary files differnew file mode 100644 index 000000000..b51766f75 --- /dev/null +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz Binary files differindex acf47a4f6..092a1f933 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz Binary files differindex af57ccc47..77f333c00 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz Binary files differindex b67b1a894..8ab166a60 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz Binary files differindex 24fb6e539..97e96be17 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz Binary files differindex 0634adcba..728464ca9 100644 --- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz +++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 7c13796a6..9dd9e5121 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -17,7 +17,10 @@ import datetime +import filecmp +import os import random +import tempfile import time import glanceclient.exc @@ -468,6 +471,40 @@ class TestGlanceImageService(test.TestCase): self.flags(glance_num_retries=1) service.download(self.context, image_id, writer) + def test_download_file_url(self): + class MyGlanceStubClient(glance_stubs.StubGlanceClient): + """A client that returns a file url.""" + + (outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc') + outf = os.fdopen(outfd, 'w') + inf = open('/dev/urandom', 'r') + for i in range(10): + _data = inf.read(1024) + outf.write(_data) + outf.close() + + def get(self, image_id): + return type('GlanceTestDirectUrlMeta', (object,), + {'direct_url': 'file://%s' + self.s_tmpfname}) + + client = MyGlanceStubClient() + (outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst') + writer = os.fdopen(outfd, 'w') + + service = self._create_image_service(client) + image_id = 1 # doesn't matter + + self.flags(allowed_direct_url_schemes=['file']) + service.download(self.context, image_id, writer) + writer.close() + + # compare the two files + rc = filecmp.cmp(tmpfname, client.s_tmpfname) + self.assertTrue(rc, "The file %s and %s should be the same" % + (tmpfname, client.s_tmpfname)) + os.remove(client.s_tmpfname) + os.remove(tmpfname) + def test_client_forbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a Forbidden exception.""" diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py index 4f8790cc7..0afe397a2 100644 --- a/nova/tests/image/test_s3.py +++ b/nova/tests/image/test_s3.py @@ -129,7 +129,7 @@ class TestS3ImageService(test.TestCase): 'snapshot_id': 'snap-12345678', 'delete_on_termination': True}, {'device_name': '/dev/sda2', - 'virutal_name': 'ephemeral0'}, + 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/sdb0', 'no_device': True}]}} _manifest, image, image_uuid = self.image_service._s3_parse_manifest( @@ -156,7 +156,7 @@ class TestS3ImageService(test.TestCase): 'snapshot_id': 'snap-12345678', 'delete_on_termination': True}, {'device_name': '/dev/sda2', - 'virutal_name': 'ephemeral0'}, + 'virtual_name': 'ephemeral0'}, {'device_name': '/dev/sdb0', 'no_device': True}] self.assertEqual(block_device_mapping, expected_bdm) diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index 0dd777fe2..fe0613646 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -89,6 +89,14 @@ "updated": "%(timestamp)s" }, { + "alias": "os-cells", + "description": "%(text)s", + "links": [], + "name": "Cells", + "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1", + "updated": "%(timestamp)s" + }, + { "alias": "os-certificates", "description": "%(text)s", "links": [], @@ -305,19 +313,19 @@ "updated": "%(timestamp)s" }, { - "alias": "os-admin-networks", + "alias": "os-networks", "description": "%(text)s", "links": [], - "name": "AdminNetworks", - "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1", + "name": "Networks", + "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", "updated": "%(timestamp)s" }, { - "alias": "os-networks", + "alias": "os-tenant-networks", "description": "%(text)s", "links": [], - "name": "OSNetworks", - "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1", + "name": "OSTenantNetworks", + "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2", "updated": "%(timestamp)s" }, { diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index fe34f369b..2051d891a 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -33,6 +33,9 @@ <extension alias="os-agents" name="Agents" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" updated="%(timestamp)s"> <description>%(text)s</description> </extension> + <extension alias="os-cells" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells"> + <description>%(text)s</description> + </extension> <extension alias="os-certificates" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates"> <description>%(text)s</description> </extension> @@ -114,10 +117,10 @@ <extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate"> <description>%(text)s</description> </extension> - <extension alias="os-admin-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks"> + <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks"> <description>%(text)s</description> </extension> - <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks"> + <extension alias="os-tenant-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks"> <description>%(text)s</description> </extension> <extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport"> diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl new file mode 100644 index 000000000..6d44692e1 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl @@ -0,0 +1,48 @@ +{ + "availabilityZoneInfo": [ + { + "zoneName": "zone-1", + "zoneState": { + "available": true + }, + "hosts": { + "fake_host-1": { + "nova-compute": { + "active": true, + "available": true, + "updated_at": "2012-12-26T14:45:25.000000" + } + } + } + }, + { + "zoneName": "internal", + "zoneState": { + "available": true + }, + "hosts": { + "fake_host-1": { + "nova-sched": { + "active": true, + "available": true, + "updated_at": "2012-12-26T14:45:25.000000" + } + }, + "fake_host-2": { + "nova-network": { + "active": true, + "available": false, + "updated_at": "2012-12-26T14:45:24.000000" + } + } + } + }, + { + "zoneName": "zone-2", + "zoneState": { + "available": false + }, + "hosts": null + } + ] +}
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl new file mode 100644 index 000000000..856a64957 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl @@ -0,0 +1,44 @@ +<?xml version='1.0' encoding='UTF-8'?> +<availabilityZones + xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1"> + <availabilityZone name="zone-1"> + <zoneState available="True" /> + <hosts> + <host name="fake_host-1"> + <services> + <service name="nova-compute"> + <serviceState available="True" active="True" + updated_at="2012-12-26 14:45:25" /> + </service> + </services> + </host> + </hosts> + <metadata /> + </availabilityZone> + <availabilityZone name="internal"> + <zoneState available="True" /> + <hosts> + <host name="fake_host-1"> + <services> + <service name="nova-sched"> + <serviceState available="True" active="True" + updated_at="2012-12-26 14:45:25" /> + </service> + </services> + </host> + <host name="fake_host-2"> + <services> + <service name="nova-network"> + <serviceState available="False" active="True" + updated_at="2012-12-26 14:45:24" /> + </service> + </services> + </host> + </hosts> + <metadata /> + </availabilityZone> + <availabilityZone name="zone-2"> + <zoneState available="False" /> + <metadata /> + </availabilityZone> +</availabilityZones>
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl new file mode 100644 index 000000000..381708aaf --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl @@ -0,0 +1,18 @@ +{ + "availabilityZoneInfo": [ + { + "zoneName": "zone-1", + "zoneState": { + "available": true + }, + "hosts": null + }, + { + "zoneName": "zone-2", + "zoneState": { + "available": false + }, + "hosts": null + } + ] +}
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl new file mode 100644 index 000000000..1eff177de --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl @@ -0,0 +1,12 @@ +<?xml version='1.0' encoding='UTF-8'?> +<availabilityZones + xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1"> + <availabilityZone name="zone-1"> + <zoneState available="True" /> + <metadata /> + </availabilityZone> + <availabilityZone name="zone-2"> + <zoneState available="False" /> + <metadata /> + </availabilityZone> +</availabilityZones>
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl new file mode 100644 index 000000000..2993b1df8 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl @@ -0,0 +1,9 @@ +{ + "cell": { + "name": "cell3", + "username": "username3", + "rpc_host": null, + "rpc_port": null, + "type": "child" + } +} diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl new file mode 100644 index 000000000..d31a674a2 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl @@ -0,0 +1,2 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cell xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/> diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl new file mode 100644 index 000000000..b16e12cd6 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl @@ -0,0 +1,4 @@ +{ + "cells": [] +} + diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl new file mode 100644 index 000000000..32fef4f04 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl @@ -0,0 +1,2 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"/> diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl new file mode 100644 index 000000000..3d7a6c207 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "name": "cell1", + "username": "username1", + "rpc_host": null, + "rpc_port": null, + "type": "child" + }, + { + "name": "cell2", + "username": "username2", + "rpc_host": null, + "rpc_port": null, + "type": "parent" + }, + { + "name": "cell3", + "username": "username3", + "rpc_host": null, + "rpc_port": null, + "type": "child" + }, + { + "name": "cell4", + "username": "username4", + "rpc_host": null, + "rpc_port": null, + "type": "parent" + }, + { + "name": "cell5", + "username": "username5", + "rpc_host": null, + "rpc_port": null, + "type": "child" + } + ] +} diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl new file mode 100644 index 000000000..58312201f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl @@ -0,0 +1,8 @@ +<?xml version='1.0' encoding='UTF-8'?> +<cells xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"> + <cell name="cell1" username="username1" rpc_port="None" rpc_host="None" type="child"/> + <cell name="cell2" username="username2" rpc_port="None" rpc_host="None" type="parent"/> + <cell name="cell3" username="username3" rpc_port="None" rpc_host="None" type="child"/> + <cell name="cell4" username="username4" rpc_port="None" rpc_host="None" type="parent"/> + <cell name="cell5" username="username5" rpc_port="None" rpc_host="None" type="child"/> +</cells> diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl new file mode 100644 index 000000000..d04f7c7ae --- /dev/null +++ b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl @@ -0,0 +1,5 @@ +{ + "os-getSPICEConsole": { + "type": "spice-html5" + } +} diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl new file mode 100644 index 000000000..c8cd2df9f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<os-getSPICEConsole> + <type>spice-html5</type> +</os-getSPICEConsole> diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl new file mode 100644 index 000000000..20e260e9e --- /dev/null +++ b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl @@ -0,0 +1,6 @@ +{ + "console": { + "type": "spice-html5", + "url":"%(url)s" + } +} diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl new file mode 100644 index 000000000..77e35ae5b --- /dev/null +++ b/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl @@ -0,0 +1,5 @@ +<?xml version='1.0' encoding='UTF-8'?> +<console> + <type>spice-html5</type> + <url>%(url)s</url> +</console> diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl index eeb191597..504f66f59 100644 --- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl @@ -21,9 +21,14 @@ "zone": "internal" }, { - "host_name": "%(host_name)s", - "service": "conductor", - "zone": "internal" + "host_name": "%(host_name)s", + "service": "conductor", + "zone": "internal" + }, + { + "host_name": "%(host_name)s", + "service": "cells", + "zone": "internal" } ] } diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl index 25ef5a299..4e9d3195d 100644 --- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl @@ -5,4 +5,5 @@ <host host_name="%(host_name)s" service="network"/> <host host_name="%(host_name)s" service="scheduler"/> <host host_name="%(host_name)s" service="conductor"/> + <host host_name="%(host_name)s" service="cells"/> </hosts> diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl index 757084d2f..757084d2f 100644 --- a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl +++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl index fb1c2d3d0..fb1c2d3d0 100644 --- a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl +++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl index ff9e2273d..ff9e2273d 100644 --- a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl +++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index e20d6881b..f17dc025f 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -27,7 +27,7 @@ import nova.image.glance from nova.openstack.common import cfg from nova.openstack.common.log import logging from nova import service -from nova import test # For the flags +from nova import test from nova.tests import fake_crypto import nova.tests.image.fake from nova.tests.integrated.api import client @@ -35,6 +35,8 @@ from nova.tests.integrated.api import client CONF = cfg.CONF LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.import_opt('manager', 'nova.cells.opts', group='cells') def generate_random_alphanumeric(length): @@ -81,6 +83,7 @@ class _IntegratedTestBase(test.TestCase): self.scheduler = self.start_service('cert') self.network = self.start_service('network') self.scheduler = self.start_service('scheduler') + self.cells = self.start_service('cells', manager=CONF.cells.manager) self._start_api_service() diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 0cbc1352b..f101da243 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -28,14 +28,13 @@ from lxml import etree from nova.api.metadata import password from nova.api.openstack.compute.contrib import coverage_ext # Import extensions to pull in osapi_compute_extension CONF option used below. -from nova.api.openstack.compute import extensions from nova.cloudpipe.pipelib import CloudPipe from nova import context from nova import db from nova.db.sqlalchemy import models from nova import exception -from nova.network import api -from nova.network.manager import NetworkManager +from nova.network import api as network_api +from nova.network import manager as network_manager from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -55,6 +54,8 @@ CONF.import_opt('osapi_compute_extension', CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib') CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common') CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common') +CONF.import_opt('enable', 'nova.cells.opts', group='cells') +CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells') LOG = logging.getLogger(__name__) @@ -143,7 +144,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase): template = self._get_template(name) if self.generate_samples and not os.path.exists(template): - with open(template, 'w') as outf: + with open(template, 'w'): pass with open(template) as inf: return inf.read().strip() @@ -170,23 +171,32 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase): if not isinstance(result, list): raise NoMatch( _('Result: %(result)s is not a list.') % locals()) - if len(expected) != len(result): - raise NoMatch( - _('Length mismatch: %(result)s\n%(expected)s.') - % locals()) + + expected = expected[:] + extra = [] for res_obj in result: - for ex_obj in expected: + for i, ex_obj in enumerate(expected): try: - res = self._compare_result(subs, ex_obj, res_obj) + matched_value = self._compare_result(subs, ex_obj, + res_obj) + del expected[i] break except NoMatch: pass else: - raise NoMatch( - _('Result: %(res_obj)s not in %(expected)s.') - % locals()) - matched_value = res or matched_value + extra.append(res_obj) + + error = [] + if expected: + error.append(_('Extra items in expected:')) + error.extend([repr(o) for o in expected]) + + if extra: + error.append(_('Extra items in result:')) + error.extend([repr(o) for o in extra]) + if error: + raise NoMatch('\n'.join(error)) elif isinstance(expected, basestring) and '%' in expected: # NOTE(vish): escape stuff for regex for char in '[]<>?': @@ -372,7 +382,7 @@ class ApiSamplesTrap(ApiSampleTestBase): do_not_approve_additions.append('os-fping') do_not_approve_additions.append('os-hypervisors') do_not_approve_additions.append('os-instance_usage_audit_log') - do_not_approve_additions.append('os-admin-networks') + do_not_approve_additions.append('os-networks') do_not_approve_additions.append('os-services') do_not_approve_additions.append('os-volumes') @@ -671,7 +681,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase): return self._verify_response('images-details-get-resp', subs, response) def test_image_metadata_get(self): - # Get api sample of a image metadata request. + # Get api sample of an image metadata request. image_id = fake.get_valid_image_id() response = self._do_get('images/%s/metadata' % image_id) subs = self._get_regexes() @@ -700,7 +710,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase): subs, response) def test_image_meta_key_get(self): - # Get api sample of a image metadata key request. + # Get api sample of an image metadata key request. image_id = fake.get_valid_image_id() key = "kernel_id" response = self._do_get('images/%s/metadata/%s' % (image_id, key)) @@ -1502,7 +1512,8 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase): 'vpn_public_port': 22} self.stubs.Set(CloudPipe, 'get_encoded_zip', get_user_data) - self.stubs.Set(NetworkManager, "get_network", network_api_get) + self.stubs.Set(network_manager.NetworkManager, "get_network", + network_api_get) def generalize_subs(self, subs, vanilla_regexes): subs['project_id'] = 'cloudpipe-[0-9a-f-]+' @@ -2091,8 +2102,8 @@ class AdminActionsSamplesJsonTest(ServersSampleBase): hypervisor_type='bar', hypervisor_version='1', disabled=False) - return [{'compute_node': [service]}] - self.stubs.Set(db, "service_get_all_compute_by_host", fake_get_compute) + return {'compute_node': [service]} + self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute) response = self._do_post('servers/%s/action' % self.uuid, 'admin-actions-live-migrate', @@ -2114,6 +2125,11 @@ class ConsolesSampleJsonTests(ServersSampleBase): extension_name = ("nova.api.openstack.compute.contrib" ".consoles.Consoles") + def setUp(self): + super(ConsolesSampleJsonTests, self).setUp() + self.flags(vnc_enabled=True) + self.flags(enabled=True, group='spice') + def test_get_vnc_console(self): uuid = self._post_server() response = self._do_post('servers/%s/action' % uuid, @@ -2126,6 +2142,18 @@ class ConsolesSampleJsonTests(ServersSampleBase): return self._verify_response('get-vnc-console-post-resp', subs, response) + def test_get_spice_console(self): + uuid = self._post_server() + response = self._do_post('servers/%s/action' % uuid, + 'get-spice-console-post-req', + {'action': 'os-getSPICEConsole'}) + self.assertEqual(response.status, 200) + subs = self._get_regexes() + subs["url"] = \ + "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)" + return self._verify_response('get-spice-console-post-resp', + subs, response) + class ConsolesSampleXmlTests(ConsolesSampleJsonTests): ctype = 'xml' @@ -2359,8 +2387,8 @@ class DiskConfigXmlTest(DiskConfigJsonTest): class OsNetworksJsonTests(ApiSampleTestBase): - extension_name = ("nova.api.openstack.compute.contrib.os_networks" - ".Os_networks") + extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks" + ".Os_tenant_networks") def setUp(self): super(OsNetworksJsonTests, self).setUp() @@ -2377,21 +2405,22 @@ class OsNetworksJsonTests(ApiSampleTestBase): self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake) def test_list_networks(self): - response = self._do_get('os-networks') + response = self._do_get('os-tenant-networks') self.assertEqual(response.status, 200) subs = self._get_regexes() return self._verify_response('networks-list-res', subs, response) def test_create_network(self): - response = self._do_post('os-networks', "networks-post-req", {}) + response = self._do_post('os-tenant-networks', "networks-post-req", {}) self.assertEqual(response.status, 200) subs = self._get_regexes() self._verify_response('networks-post-res', subs, response) - def test_delete_networK(self): - response = self._do_post('os-networks', "networks-post-req", {}) + def test_delete_network(self): + response = self._do_post('os-tenant-networks', "networks-post-req", {}) net = json.loads(response.read()) - response = self._do_delete('os-networks/%s' % net["network"]["id"]) + response = self._do_delete('os-tenant-networks/%s' % + net["network"]["id"]) self.assertEqual(response.status, 202) @@ -2406,7 +2435,7 @@ class NetworksAssociateJsonTests(ApiSampleTestBase): f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] # Networks_associate requires Networks to be update f['osapi_compute_extension'].append( - 'nova.api.openstack.compute.contrib.admin_networks.Admin_networks') + 'nova.api.openstack.compute.contrib.os_networks.Os_networks') return f def setUp(self): @@ -2417,28 +2446,28 @@ class NetworksAssociateJsonTests(ApiSampleTestBase): project=NetworksAssociateJsonTests._sentinel): return True - self.stubs.Set(api.API, "associate", fake_associate) + self.stubs.Set(network_api.API, "associate", fake_associate) def test_disassociate(self): - response = self._do_post('os-admin-networks/1/action', + response = self._do_post('os-networks/1/action', 'network-disassociate-req', {}) self.assertEqual(response.status, 202) def test_disassociate_host(self): - response = self._do_post('os-admin-networks/1/action', + response = self._do_post('os-networks/1/action', 'network-disassociate-host-req', {}) self.assertEqual(response.status, 202) def test_disassociate_project(self): - response = self._do_post('os-admin-networks/1/action', + response = self._do_post('os-networks/1/action', 'network-disassociate-project-req', {}) self.assertEqual(response.status, 202) def test_associate_host(self): - response = self._do_post('os-admin-networks/1/action', + response = self._do_post('os-networks/1/action', 'network-associate-host-req', {"host": "testHost"}) self.assertEqual(response.status, 202) @@ -2500,3 +2529,63 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase): class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests): ctype = "xml" + + +class CellsSampleJsonTest(ApiSampleTestBase): + extension_name = "nova.api.openstack.compute.contrib.cells.Cells" + + def setUp(self): + # db_check_interval < 0 makes cells manager always hit the DB + self.flags(enable=True, db_check_interval=-1, group='cells') + super(CellsSampleJsonTest, self).setUp() + self._stub_cells() + + def _stub_cells(self, num_cells=5): + self.cells = [] + self.cells_next_id = 1 + + def _fake_cell_get_all(context): + return self.cells + + def _fake_cell_get(context, cell_name): + for cell in self.cells: + if cell['name'] == cell_name: + return cell + raise exception.CellNotFound(cell_name=cell_name) + + for x in xrange(num_cells): + cell = models.Cell() + our_id = self.cells_next_id + self.cells_next_id += 1 + cell.update({'id': our_id, + 'name': 'cell%s' % our_id, + 'username': 'username%s' % our_id, + 'is_parent': our_id % 2 == 0}) + self.cells.append(cell) + + self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all) + self.stubs.Set(db, 'cell_get', _fake_cell_get) + + def test_cells_empty_list(self): + # Override this + self._stub_cells(num_cells=0) + response = self._do_get('os-cells') + self.assertEqual(response.status, 200) + subs = self._get_regexes() + return self._verify_response('cells-list-empty-resp', subs, response) + + def test_cells_list(self): + response = self._do_get('os-cells') + self.assertEqual(response.status, 200) + subs = self._get_regexes() + return self._verify_response('cells-list-resp', subs, response) + + def test_cells_get(self): + response = self._do_get('os-cells/cell3') + self.assertEqual(response.status, 200) + subs = self._get_regexes() + return self._verify_response('cells-get-resp', subs, response) + + +class CellsSampleXmlTest(CellsSampleJsonTest): + ctype = 'xml' diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py index b6e1adc73..ca5ff8374 100644 --- a/nova/tests/integrated/test_extensions.py +++ b/nova/tests/integrated/test_extensions.py @@ -16,7 +16,6 @@ # under the License. # Import extensions to pull in osapi_compute_extension CONF option used below. -from nova.api.openstack.compute import extensions from nova.openstack.common import cfg from nova.openstack.common.log import logging from nova.tests.integrated import integrated_helpers diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py index 94cccd9d9..959c5a472 100644 --- a/nova/tests/network/test_api.py +++ b/nova/tests/network/test_api.py @@ -17,8 +17,11 @@ """Tests for network API.""" +import itertools import random +import mox + from nova import context from nova import exception from nova import network @@ -37,6 +40,25 @@ class ApiTestCase(test.TestCase): self.context = context.RequestContext('fake-user', 'fake-project') + def test_allocate_for_instance_handles_macs_passed(self): + # If a macs argument is supplied to the 'nova-network' API, it is just + # ignored. This test checks that the call down to the rpcapi layer + # doesn't pass macs down: nova-network doesn't support hypervisor + # mac address limits (today anyhow). + macs = set(['ab:cd:ef:01:23:34']) + self.mox.StubOutWithMock( + self.network_api.network_rpcapi, "allocate_for_instance") + kwargs = dict(zip(['host', 'instance_id', 'instance_uuid', + 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'], + itertools.repeat(mox.IgnoreArg()))) + self.network_api.network_rpcapi.allocate_for_instance( + mox.IgnoreArg(), **kwargs).AndReturn([]) + self.mox.ReplayAll() + instance = dict(id='id', uuid='uuid', project_id='project_id', + host='host', instance_type={'rxtx_factor': 0}) + self.network_api.allocate_for_instance( + 'context', instance, 'vpn', 'requested_networks', macs=macs) + def _do_test_associate_floating_ip(self, orig_instance_uuid): """Test post-association logic.""" diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index c0770902d..8a7865b83 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -469,13 +469,9 @@ class LinuxNetworkTestCase(test.TestCase): '--arp-ip-src', dhcp, '-j', 'DROP'), ('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), - ('iptables-save', '-c', '-t', 'filter'), + ('iptables-save', '-c'), ('iptables-restore', '-c'), - ('iptables-save', '-c', '-t', 'mangle'), - ('iptables-restore', '-c'), - ('iptables-save', '-c', '-t', 'nat'), - ('iptables-restore', '-c'), - ('ip6tables-save', '-c', '-t', 'filter'), + ('ip6tables-save', '-c'), ('ip6tables-restore', '-c'), ] self.assertEqual(executes, expected) @@ -508,13 +504,9 @@ class LinuxNetworkTestCase(test.TestCase): '--arp-ip-dst', dhcp, '-j', 'DROP'), ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface, '--arp-ip-src', dhcp, '-j', 'DROP'), - ('iptables-save', '-c', '-t', 'filter'), - ('iptables-restore', '-c'), - ('iptables-save', '-c', '-t', 'mangle'), - ('iptables-restore', '-c'), - ('iptables-save', '-c', '-t', 'nat'), + ('iptables-save', '-c'), ('iptables-restore', '-c'), - ('ip6tables-save', '-c', '-t', 'filter'), + ('ip6tables-save', '-c'), ('ip6tables-restore', '-c'), ] self.assertEqual(executes, expected) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 385aea1ee..b5b3ec107 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -16,8 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. import shutil -import tempfile +import fixtures import mox from nova import context @@ -142,7 +142,7 @@ vifs = [{'id': 0, class FlatNetworkTestCase(test.TestCase): def setUp(self): super(FlatNetworkTestCase, self).setUp() - self.tempdir = tempfile.mkdtemp() + self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = network_manager.FlatManager(host=HOST) self.network.instance_dns_domain = '' @@ -150,10 +150,6 @@ class FlatNetworkTestCase(test.TestCase): self.context = context.RequestContext('testuser', 'testproject', is_admin=False) - def tearDown(self): - shutil.rmtree(self.tempdir) - super(FlatNetworkTestCase, self).tearDown() - def test_get_instance_nw_info(self): fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info @@ -186,6 +182,7 @@ class FlatNetworkTestCase(test.TestCase): 'mac': 'DE:AD:BE:EF:00:%02x' % nid, 'rxtx_cap': 30, 'vif_type': net_model.VIF_TYPE_BRIDGE, + 'vif_devname': None, 'vif_uuid': '00000000-0000-0000-0000-00000000000000%02d' % nid, 'should_create_vlan': False, @@ -1628,7 +1625,7 @@ class FloatingIPTestCase(test.TestCase): """Tests nova.network.manager.FloatingIP.""" def setUp(self): super(FloatingIPTestCase, self).setUp() - self.tempdir = tempfile.mkdtemp() + self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db @@ -1636,10 +1633,6 @@ class FloatingIPTestCase(test.TestCase): self.context = context.RequestContext('testuser', self.project_id, is_admin=False) - def tearDown(self): - shutil.rmtree(self.tempdir) - super(FloatingIPTestCase, self).tearDown() - def test_disassociate_floating_ip_multi_host_calls(self): floating_ip = { 'fixed_ip_id': 12 @@ -2127,7 +2120,7 @@ class InstanceDNSTestCase(test.TestCase): """Tests nova.network.manager instance DNS.""" def setUp(self): super(InstanceDNSTestCase, self).setUp() - self.tempdir = tempfile.mkdtemp() + self.tempdir = self.useFixture(fixtures.TempDir()).path self.flags(log_dir=self.tempdir) self.network = TestFloatingIPManager() self.network.db = db @@ -2135,10 +2128,6 @@ class InstanceDNSTestCase(test.TestCase): self.context = context.RequestContext('testuser', self.project_id, is_admin=False) - def tearDown(self): - shutil.rmtree(self.tempdir) - super(InstanceDNSTestCase, self).tearDown() - def test_dns_domains_private(self): zone1 = 'testzone' domain1 = 'example.org' diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py index 004e76071..c9b2e43b3 100644 --- a/nova/tests/network/test_quantumv2.py +++ b/nova/tests/network/test_quantumv2.py @@ -342,32 +342,37 @@ class TestQuantumv2(test.TestCase): self.assertEquals('my_mac%s' % id_suffix, nw_inf[0]['address']) self.assertEquals(0, len(nw_inf[0]['network']['subnets'])) - def _allocate_for_instance(self, net_idx=1, **kwargs): + def _stub_allocate_for_instance(self, net_idx=1, **kwargs): api = quantumapi.API() self.mox.StubOutWithMock(api, 'get_instance_nw_info') # Net idx is 1-based for compatibility with existing unit tests nets = self.nets[net_idx - 1] - api.get_instance_nw_info(mox.IgnoreArg(), - self.instance, - networks=nets).AndReturn(None) - ports = {} fixed_ips = {} + macs = kwargs.get('macs') + if macs: + macs = set(macs) req_net_ids = [] if 'requested_networks' in kwargs: for id, fixed_ip, port_id in kwargs['requested_networks']: if port_id: self.moxed_client.show_port(port_id).AndReturn( {'port': {'id': 'my_portid1', - 'network_id': 'my_netid1'}}) + 'network_id': 'my_netid1', + 'mac_address': 'my_mac1'}}) ports['my_netid1'] = self.port_data1[0] id = 'my_netid1' + if macs is not None: + macs.discard('my_mac1') else: fixed_ips[id] = fixed_ip req_net_ids.append(id) expected_network_order = req_net_ids else: expected_network_order = [n['id'] for n in nets] + if kwargs.get('_break') == 'pre_list_networks': + self.mox.ReplayAll() + return api search_ids = [net['id'] for net in nets if net['id'] in req_net_ids] mox_list_network_params = dict(tenant_id=self.instance['project_id'], @@ -382,8 +387,10 @@ class TestQuantumv2(test.TestCase): mox_list_network_params['id'] = mox.SameElementsAs(search_ids) self.moxed_client.list_networks( **mox_list_network_params).AndReturn({'networks': []}) - for net_id in expected_network_order: + if kwargs.get('_break') == 'net_id2': + self.mox.ReplayAll() + return api port_req_body = { 'port': { 'device_id': self.instance['uuid'], @@ -406,10 +413,23 @@ class TestQuantumv2(test.TestCase): port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = \ self.instance['project_id'] + if macs: + port_req_body['port']['mac_address'] = macs.pop() res_port = {'port': {'id': 'fake'}} self.moxed_client.create_port( MyComparator(port_req_body)).AndReturn(res_port) + + if kwargs.get('_break') == 'pre_get_instance_nw_info': + self.mox.ReplayAll() + return api + api.get_instance_nw_info(mox.IgnoreArg(), + self.instance, + networks=nets).AndReturn(None) self.mox.ReplayAll() + return api + + def _allocate_for_instance(self, net_idx=1, **kwargs): + api = self._stub_allocate_for_instance(net_idx, **kwargs) api.allocate_for_instance(self.context, self.instance, **kwargs) def test_allocate_for_instance_1(self): @@ -420,6 +440,73 @@ class TestQuantumv2(test.TestCase): # Allocate one port in two networks env. self._allocate_for_instance(2) + def test_allocate_for_instance_accepts_macs_kwargs_None(self): + # The macs kwarg should be accepted as None. + self._allocate_for_instance(1, macs=None) + + def test_allocate_for_instance_accepts_macs_kwargs_set(self): + # The macs kwarg should be accepted, as a set, the + # _allocate_for_instance helper checks that the mac is used to create a + # port. + self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45'])) + + def test_allocate_for_instance_not_enough_macs_via_ports(self): + # using a hypervisor MAC via a pre-created port will stop it being + # used to dynamically create a port on a network. We put the network + # first in requested_networks so that if the code were to not pre-check + # requested ports, it would incorrectly assign the mac and not fail. + requested_networks = [ + (self.nets2[1]['id'], None, None), + (None, None, 'my_portid1')] + api = self._stub_allocate_for_instance( + net_idx=2, requested_networks=requested_networks, + macs=set(['my_mac1']), + _break='net_id2') + self.assertRaises(exception.PortNotFree, + api.allocate_for_instance, self.context, + self.instance, requested_networks=requested_networks, + macs=set(['my_mac1'])) + + def test_allocate_for_instance_not_enough_macs(self): + # If not enough MAC addresses are available to allocate to networks, an + # error should be raised. + # We could pass in macs=set(), but that wouldn't tell us that + # allocate_for_instance tracks used macs properly, so we pass in one + # mac, and ask for two networks. + requested_networks = [ + (self.nets2[1]['id'], None, None), + (self.nets2[0]['id'], None, None)] + api = self._stub_allocate_for_instance( + net_idx=2, requested_networks=requested_networks, + macs=set(['my_mac2']), + _break='pre_get_instance_nw_info') + self.assertRaises(exception.PortNotFree, + api.allocate_for_instance, self.context, + self.instance, requested_networks=requested_networks, + macs=set(['my_mac2'])) + + def test_allocate_for_instance_two_macs_two_networks(self): + # If two MACs are available and two networks requested, two new ports + # get made and no exceptions raised. + requested_networks = [ + (self.nets2[1]['id'], None, None), + (self.nets2[0]['id'], None, None)] + self._allocate_for_instance( + net_idx=2, requested_networks=requested_networks, + macs=set(['my_mac2', 'my_mac1'])) + + def test_allocate_for_instance_mac_conflicting_requested_port(self): + # specify only first and last network + requested_networks = [(None, None, 'my_portid1')] + api = self._stub_allocate_for_instance( + net_idx=1, requested_networks=requested_networks, + macs=set(['unknown:mac']), + _break='pre_list_networks') + self.assertRaises(exception.PortNotUsable, + api.allocate_for_instance, self.context, + self.instance, requested_networks=requested_networks, + macs=set(['unknown:mac'])) + def test_allocate_for_instance_with_requested_networks(self): # specify only first and last network requested_networks = [ @@ -435,7 +522,6 @@ class TestQuantumv2(test.TestCase): requested_networks=requested_networks) def test_allocate_for_instance_with_requested_networks_with_port(self): - # specify only first and last network requested_networks = [(None, None, 'myportid1')] self._allocate_for_instance(net_idx=1, requested_networks=requested_networks) @@ -916,6 +1002,54 @@ class TestQuantumv2(test.TestCase): self.mox.ReplayAll() api.disassociate_floating_ip(self.context, self.instance, address) + def test_add_fixed_ip_to_instance(self): + api = quantumapi.API() + network_id = 'my_netid1' + search_opts = {'network_id': network_id} + self.moxed_client.list_subnets( + **search_opts).AndReturn({'subnets': self.subnet_data1}) + + zone = 'compute:%s' % self.instance['availability_zone'] + search_opts = {'device_id': self.instance['uuid'], + 'device_owner': 'compute:nova', + 'network_id': network_id} + self.moxed_client.list_ports( + **search_opts).AndReturn({'ports': self.port_data1}) + port_req_body = { + 'port': { + 'fixed_ips': [{'subnet_id': 'my_subid1'}], + }, + } + port = self.port_data1[0] + port['fixed_ips'] = [{'subnet_id': 'my_subid1'}] + self.moxed_client.update_port('my_portid1', + MyComparator(port_req_body)).AndReturn({'port': port}) + + self.mox.ReplayAll() + api.add_fixed_ip_to_instance(self.context, self.instance, network_id) + + def test_remove_fixed_ip_from_instance(self): + api = quantumapi.API() + address = '10.0.0.3' + zone = 'compute:%s' % self.instance['availability_zone'] + search_opts = {'device_id': self.instance['uuid'], + 'device_owner': zone, + 'fixed_ips': 'ip_address=%s' % address} + self.moxed_client.list_ports( + **search_opts).AndReturn({'ports': self.port_data1}) + port_req_body = { + 'port': { + 'fixed_ips': [], + }, + } + port = self.port_data1[0] + port['fixed_ips'] = [] + self.moxed_client.update_port('my_portid1', + MyComparator(port_req_body)).AndReturn({'port': port}) + + self.mox.ReplayAll() + api.remove_fixed_ip_from_instance(self.context, self.instance, address) + class TestQuantumv2ModuleMethods(test.TestCase): def test_ensure_requested_network_ordering_no_preference(self): diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py index 90bffeeaf..5ba7459fb 100644 --- a/nova/tests/network/test_rpcapi.py +++ b/nova/tests/network/test_rpcapi.py @@ -108,8 +108,9 @@ class NetworkRpcAPITestCase(test.TestCase): def test_get_floating_ip(self): self._test_network_api('get_floating_ip', rpc_method='call', id='id') - def test_get_floating_pools(self): - self._test_network_api('get_floating_pools', rpc_method='call') + def test_get_floating_ip_pools(self): + self._test_network_api('get_floating_ip_pools', rpc_method='call', + version="1.7") def test_get_floating_ip_by_address(self): self._test_network_api('get_floating_ip_by_address', rpc_method='call', diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py index 26cde055b..76fba900d 100644 --- a/nova/tests/scheduler/test_chance_scheduler.py +++ b/nova/tests/scheduler/test_chance_scheduler.py @@ -130,11 +130,11 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase): # instance 1 ctxt.elevated().AndReturn(ctxt_elevated) self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([]) - compute_utils.add_instance_fault_from_exc(ctxt, - uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) - db.instance_update_and_get_original(ctxt, uuid, + old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) + compute_utils.add_instance_fault_from_exc(ctxt, + new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.schedule_run_instance( diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index 5d8e8236b..2bd2cb85b 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -58,11 +58,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + old_ref, new_ref = db.instance_update_and_get_original(fake_context, + uuid, {'vm_state': vm_states.ERROR, 'task_state': + None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, - uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) - db.instance_update_and_get_original(fake_context, uuid, - {'vm_state': vm_states.ERROR, - 'task_state': None}).AndReturn(({}, {})) + new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}) @@ -88,11 +88,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): 'instance_uuids': [uuid]} self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + old_ref, new_ref = db.instance_update_and_get_original(fake_context, + uuid, {'vm_state': vm_states.ERROR, 'task_state': + None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, - uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) - db.instance_update_and_get_original(fake_context, uuid, - {'vm_state': vm_states.ERROR, - 'task_state': None}).AndReturn(({}, {})) + new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}) diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 9f7f189cc..f8b9f9296 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -22,6 +22,7 @@ from nova import context from nova import db from nova.openstack.common import cfg from nova.openstack.common import jsonutils +from nova.openstack.common import timeutils from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops from nova.scheduler.filters.trusted_filter import AttestationService @@ -233,11 +234,13 @@ class HostFiltersTestCase(test.TestCase): def fake_oat_request(self, *args, **kwargs): """Stubs out the response from OAT service.""" - return httplib.OK, jsonutils.loads(self.oat_data) + self.oat_attested = True + return httplib.OK, self.oat_data def setUp(self): super(HostFiltersTestCase, self).setUp() self.oat_data = '' + self.oat_attested = False self.stubs = stubout.StubOutForTesting() self.stubs.Set(AttestationService, '_request', self.fake_oat_request) self.context = context.RequestContext('fake', 'fake') @@ -1147,54 +1150,121 @@ class HostFiltersTestCase(test.TestCase): def test_trusted_filter_default_passes(self): self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() - filter_properties = {'instance_type': {'memory_mb': 1024}} + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_trusted_passes(self): - self.oat_data =\ - '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}' + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "trusted", + "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'trusted'} - filter_properties = {'instance_type': {'memory_mb': 1024, + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_trusted_and_untrusted_fails(self): - self.oat_data =\ - '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}' + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "untrusted", + "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'trusted'} - filter_properties = {'instance_type': {'memory_mb': 1024, + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_trusted_fails(self): - self.oat_data =\ - '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}' + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "trusted", + "vtime": timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} - filter_properties = {'instance_type': {'memory_mb': 1024, + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertFalse(filt_cls.host_passes(host, filter_properties)) def test_trusted_filter_untrusted_and_untrusted_passes(self): - self.oat_data =\ - '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}' + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "untrusted", + "vtime":timeutils.isotime()}]} self._stub_service_is_up(True) filt_cls = self.class_map['TrustedFilter']() extra_specs = {'trust:trusted_host': 'untrusted'} - filter_properties = {'instance_type': {'memory_mb': 1024, + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, 'extra_specs': extra_specs}} host = fakes.FakeHostState('host1', 'node1', {}) self.assertTrue(filt_cls.host_passes(host, filter_properties)) + def test_trusted_filter_update_cache(self): + self.oat_data = {"hosts": [{"host_name": + "host1", "trust_lvl": "untrusted", + "vtime": timeutils.isotime()}]} + + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trust:trusted_host': 'untrusted'} + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'node1', {}) + + filt_cls.host_passes(host, filter_properties) # Fill the caches + + self.oat_attested = False + filt_cls.host_passes(host, filter_properties) + self.assertFalse(self.oat_attested) + + self.oat_attested = False + + timeutils.set_time_override(timeutils.utcnow()) + timeutils.advance_time_seconds( + CONF.trusted_computing.attestation_auth_timeout + 80) + filt_cls.host_passes(host, filter_properties) + self.assertTrue(self.oat_attested) + + timeutils.clear_time_override() + + def test_trusted_filter_update_cache_timezone(self): + self.oat_data = {"hosts": [{"host_name": "host1", + "trust_lvl": "untrusted", + "vtime": "2012-09-09T05:10:40-04:00"}]} + + filt_cls = self.class_map['TrustedFilter']() + extra_specs = {'trust:trusted_host': 'untrusted'} + filter_properties = {'context': self.context.elevated(), + 'instance_type': {'memory_mb': 1024, + 'extra_specs': extra_specs}} + host = fakes.FakeHostState('host1', 'node1', {}) + + timeutils.set_time_override( + timeutils.normalize_time( + timeutils.parse_isotime("2012-09-09T09:10:40Z"))) + + filt_cls.host_passes(host, filter_properties) # Fill the caches + + self.oat_attested = False + filt_cls.host_passes(host, filter_properties) + self.assertFalse(self.oat_attested) + + self.oat_attested = False + timeutils.advance_time_seconds( + CONF.trusted_computing.attestation_auth_timeout - 10) + filt_cls.host_passes(host, filter_properties) + self.assertFalse(self.oat_attested) + + timeutils.clear_time_override() + def test_core_filter_passes(self): filt_cls = self.class_map['CoreFilter']() filter_properties = {'instance_type': {'vcpus': 1}} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index ceea74e70..eb4c3864f 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -111,13 +111,13 @@ class SchedulerManagerTestCase(test.TestCase): def test_show_host_resources(self): host = 'fake_host' - computes = [{'host': host, - 'compute_node': [{'vcpus': 4, - 'vcpus_used': 2, - 'memory_mb': 1024, - 'memory_mb_used': 512, - 'local_gb': 1024, - 'local_gb_used': 512}]}] + compute_node = {'host': host, + 'compute_node': [{'vcpus': 4, + 'vcpus_used': 2, + 'memory_mb': 1024, + 'memory_mb_used': 512, + 'local_gb': 1024, + 'local_gb_used': 512}]} instances = [{'project_id': 'project1', 'vcpus': 1, 'memory_mb': 128, @@ -134,11 +134,11 @@ class SchedulerManagerTestCase(test.TestCase): 'root_gb': 256, 'ephemeral_gb': 0}] - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') - db.service_get_all_compute_by_host(self.context, host).AndReturn( - computes) + db.service_get_by_compute_host(self.context, host).AndReturn( + compute_node) db.instance_get_all_by_host(self.context, host).AndReturn(instances) self.mox.ReplayAll() @@ -183,12 +183,12 @@ class SchedulerManagerTestCase(test.TestCase): self.manager.driver.schedule_run_instance(self.context, request_spec, None, None, None, None, {}).AndRaise( exception.NoValidHost(reason="")) - db.instance_update_and_get_original(self.context, fake_instance_uuid, + old, new_ref = db.instance_update_and_get_original(self.context, + fake_instance_uuid, {"vm_state": vm_states.ERROR, "task_state": None}).AndReturn((inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, - fake_instance_uuid, mox.IsA(exception.NoValidHost), - mox.IgnoreArg()) + compute_utils.add_instance_fault_from_exc(self.context, new_ref, + mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() self.manager.run_instance(self.context, request_spec, @@ -217,12 +217,12 @@ class SchedulerManagerTestCase(test.TestCase): } self.manager.driver.schedule_prep_resize(**kwargs).AndRaise( exception.NoValidHost(reason="")) - db.instance_update_and_get_original(self.context, fake_instance_uuid, + old_ref, new_ref = db.instance_update_and_get_original(self.context, + fake_instance_uuid, {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn( (inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, - fake_instance_uuid, mox.IsA(exception.NoValidHost), - mox.IgnoreArg()) + compute_utils.add_instance_fault_from_exc(self.context, new_ref, + mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() self.manager.prep_resize(**kwargs) @@ -254,12 +254,12 @@ class SchedulerManagerTestCase(test.TestCase): "vm_state": "", "task_state": "", } - db.instance_update_and_get_original(self.context, fake_instance_uuid, + old_ref, new_ref = db.instance_update_and_get_original(self.context, + fake_instance_uuid, {"vm_state": vm_states.ERROR, "task_state": None}).AndReturn((inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, - fake_instance_uuid, mox.IsA(test.TestingException), - mox.IgnoreArg()) + compute_utils.add_instance_fault_from_exc(self.context, new_ref, + mox.IsA(test.TestingException), mox.IgnoreArg()) self.mox.ReplayAll() @@ -338,8 +338,6 @@ class SchedulerTestCase(test.TestCase): block_migration = False disk_over_commit = False instance = jsonutils.to_primitive(self._live_migration_instance()) - instance_id = instance['id'] - instance_uuid = instance['uuid'] self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest) @@ -362,7 +360,7 @@ class SchedulerTestCase(test.TestCase): # Test live migration when all checks pass. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') @@ -373,34 +371,32 @@ class SchedulerTestCase(test.TestCase): block_migration = True disk_over_commit = True instance = jsonutils.to_primitive(self._live_migration_instance()) - instance_id = instance['id'] - instance_uuid = instance['uuid'] # Source checks - db.service_get_all_compute_by_host(self.context, - instance['host']).AndReturn(['fake_service2']) + db.service_get_by_compute_host(self.context, + instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(True) # Destination checks (compute is up, enough memory, disk) - db.service_get_all_compute_by_host(self.context, - dest).AndReturn(['fake_service3']) + db.service_get_by_compute_host(self.context, + dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) # assert_compute_node_has_enough_memory() - db.service_get_all_compute_by_host(self.context, dest).AndReturn( - [{'compute_node': [{'memory_mb': 2048, - 'hypervisor_version': 1}]}]) + db.service_get_by_compute_host(self.context, dest).AndReturn( + {'compute_node': [{'memory_mb': 2048, + 'hypervisor_version': 1}]}) db.instance_get_all_by_host(self.context, dest).AndReturn( [dict(memory_mb=256), dict(memory_mb=512)]) # Common checks (same hypervisor, etc) - db.service_get_all_compute_by_host(self.context, dest).AndReturn( - [{'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}]) - db.service_get_all_compute_by_host(self.context, + db.service_get_by_compute_host(self.context, dest).AndReturn( + {'compute_node': [{'hypervisor_type': 'xen', + 'hypervisor_version': 1}]}) + db.service_get_by_compute_host(self.context, instance['host']).AndReturn( - [{'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1, - 'cpu_info': 'fake_cpu_info'}]}]) + {'compute_node': [{'hypervisor_type': 'xen', + 'hypervisor_version': 1, + 'cpu_info': 'fake_cpu_info'}]}) rpc.call(self.context, "compute.fake_host2", {"method": 'check_can_live_migrate_destination', @@ -440,7 +436,7 @@ class SchedulerTestCase(test.TestCase): # Raise exception when src compute node is does not exist. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False @@ -448,9 +444,9 @@ class SchedulerTestCase(test.TestCase): instance = self._live_migration_instance() # Compute down - db.service_get_all_compute_by_host(self.context, + db.service_get_by_compute_host(self.context, instance['host']).AndRaise( - exception.NotFound()) + exception.ComputeHostNotFound(host='fake')) self.mox.ReplayAll() self.assertRaises(exception.ComputeServiceUnavailable, @@ -463,7 +459,7 @@ class SchedulerTestCase(test.TestCase): # Raise exception when src compute node is not alive. self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False @@ -471,8 +467,8 @@ class SchedulerTestCase(test.TestCase): instance = self._live_migration_instance() # Compute down - db.service_get_all_compute_by_host(self.context, - instance['host']).AndReturn(['fake_service2']) + db.service_get_by_compute_host(self.context, + instance['host']).AndReturn('fake_service2') self.servicegroup_api.service_is_up('fake_service2').AndReturn(False) self.mox.ReplayAll() @@ -486,7 +482,7 @@ class SchedulerTestCase(test.TestCase): # Raise exception when dest compute node is not alive. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') dest = 'fake_host2' @@ -495,8 +491,8 @@ class SchedulerTestCase(test.TestCase): instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) - db.service_get_all_compute_by_host(self.context, - dest).AndReturn(['fake_service3']) + db.service_get_by_compute_host(self.context, + dest).AndReturn('fake_service3') # Compute is down self.servicegroup_api.service_is_up('fake_service3').AndReturn(False) @@ -511,17 +507,16 @@ class SchedulerTestCase(test.TestCase): # Confirms exception raises in case dest and src is same host. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') block_migration = False - disk_over_commit = False instance = self._live_migration_instance() # make dest same as src dest = instance['host'] self.driver._live_migration_src_check(self.context, instance) - db.service_get_all_compute_by_host(self.context, - dest).AndReturn(['fake_service3']) + db.service_get_by_compute_host(self.context, + dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.mox.ReplayAll() @@ -535,7 +530,7 @@ class SchedulerTestCase(test.TestCase): # Confirms exception raises when dest doesn't have enough memory. self.mox.StubOutWithMock(self.driver, '_live_migration_src_check') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(servicegroup.API, 'service_is_up') self.mox.StubOutWithMock(self.driver, '_get_compute_info') self.mox.StubOutWithMock(db, 'instance_get_all_by_host') @@ -546,8 +541,8 @@ class SchedulerTestCase(test.TestCase): instance = self._live_migration_instance() self.driver._live_migration_src_check(self.context, instance) - db.service_get_all_compute_by_host(self.context, - dest).AndReturn(['fake_service3']) + db.service_get_by_compute_host(self.context, + dest).AndReturn('fake_service3') self.servicegroup_api.service_is_up('fake_service3').AndReturn(True) self.driver._get_compute_info(self.context, dest).AndReturn( @@ -569,7 +564,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False @@ -579,13 +574,13 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest) - db.service_get_all_compute_by_host(self.context, dest).AndReturn( - [{'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}]) - db.service_get_all_compute_by_host(self.context, + db.service_get_by_compute_host(self.context, dest).AndReturn( + {'compute_node': [{'hypervisor_type': 'xen', + 'hypervisor_version': 1}]}) + db.service_get_by_compute_host(self.context, instance['host']).AndReturn( - [{'compute_node': [{'hypervisor_type': 'not-xen', - 'hypervisor_version': 1}]}]) + {'compute_node': [{'hypervisor_type': 'not-xen', + 'hypervisor_version': 1}]}) self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, @@ -601,7 +596,7 @@ class SchedulerTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'queue_get_for') self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'cast') - self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host') + self.mox.StubOutWithMock(db, 'service_get_by_compute_host') dest = 'fake_host2' block_migration = False @@ -611,13 +606,13 @@ class SchedulerTestCase(test.TestCase): self.driver._live_migration_src_check(self.context, instance) self.driver._live_migration_dest_check(self.context, instance, dest) - db.service_get_all_compute_by_host(self.context, dest).AndReturn( - [{'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 1}]}]) - db.service_get_all_compute_by_host(self.context, + db.service_get_by_compute_host(self.context, dest).AndReturn( + {'compute_node': [{'hypervisor_type': 'xen', + 'hypervisor_version': 1}]}) + db.service_get_by_compute_host(self.context, instance['host']).AndReturn( - [{'compute_node': [{'hypervisor_type': 'xen', - 'hypervisor_version': 2}]}]) + {'compute_node': [{'hypervisor_type': 'xen', + 'hypervisor_version': 2}]}) self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.driver.schedule_live_migration, self.context, diff --git a/nova/tests/ssl_cert/ca.crt b/nova/tests/ssl_cert/ca.crt new file mode 100644 index 000000000..9d66ca627 --- /dev/null +++ b/nova/tests/ssl_cert/ca.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg +Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy +MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi +RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX +/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI +N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl +GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If +ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb +tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ +dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK +WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ +4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk +BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID +AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j +BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx +EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG +A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h +UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 +qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm +2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ ++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX +TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a +NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V +xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv +ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy +I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY +9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA +WoRMgEwjGJWqzhJZUYpUAQ== +-----END CERTIFICATE----- diff --git a/nova/tests/ssl_cert/certificate.crt b/nova/tests/ssl_cert/certificate.crt new file mode 100644 index 000000000..3c1aa6363 --- /dev/null +++ b/nova/tests/ssl_cert/certificate.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- diff --git a/nova/tests/ssl_cert/privatekey.key b/nova/tests/ssl_cert/privatekey.key new file mode 100644 index 000000000..b63df3d29 --- /dev/null +++ b/nova/tests/ssl_cert/privatekey.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 829a98334..fb2e76e45 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -309,11 +309,10 @@ class ApiEc2TestCase(test.TestCase): try: self.ec2.create_key_pair('test') except boto_exc.EC2ResponseError, e: - if e.code == 'KeyPairExists': + if e.code == 'InvalidKeyPair.Duplicate': pass else: - self.fail("Unexpected EC2ResponseError: %s " - "(expected KeyPairExists)" % e.code) + self.assertEqual('InvalidKeyPair.Duplicate', e.code) else: self.fail('Exception not raised.') diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py new file mode 100644 index 000000000..2c5c06921 --- /dev/null +++ b/nova/tests/test_availability_zones.py @@ -0,0 +1,114 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Netease Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for availability zones +""" + +from nova import availability_zones as az +from nova import context +from nova import db +from nova.openstack.common import cfg +from nova import service +from nova import test + +CONF = cfg.CONF +CONF.import_opt('internal_service_availability_zone', + 'nova.availability_zones') +CONF.import_opt('default_availability_zone', + 'nova.availability_zones') + + +class AvailabilityZoneTestCases(test.TestCase): + """Test case for aggregate based availability zone.""" + + def setUp(self): + super(AvailabilityZoneTestCases, self).setUp() + self.host = 'me' + self.availability_zone = 'nova-test' + self.default_az = CONF.default_availability_zone + self.default_in_az = CONF.internal_service_availability_zone + self.context = context.get_admin_context() + + agg = {'name': 'agg1'} + self.agg = db.aggregate_create(self.context, agg) + + metadata = {'availability_zone': self.availability_zone} + db.aggregate_metadata_add(self.context, self.agg['id'], metadata) + + def tearDown(self): + db.aggregate_delete(self.context, self.agg['id']) + super(AvailabilityZoneTestCases, self).tearDown() + + def _create_service_with_topic(self, topic): + values = { + 'binary': 'bin', + 'host': self.host, + 'topic': topic, + } + return db.service_create(self.context, values) + + def _destroy_service(self, service): + return db.service_destroy(self.context, service['id']) + + def _add_to_aggregate(self, service): + return db.aggregate_host_add(self.context, + self.agg['id'], service['host']) + + def _delete_from_aggregate(self, service): + return db.aggregate_host_delete(self.context, + self.agg['id'], service['host']) + + def test_set_availability_zone_compute_service(self): + """Test for compute service get right availability zone.""" + service = self._create_service_with_topic('compute') + services = db.service_get_all(self.context) + + # The service is not add into aggregate, so confirm it is default + # availability zone. + new_service = az.set_availability_zones(self.context, services)[0] + self.assertEquals(new_service['availability_zone'], + self.default_az) + + # The service is added into aggregate, confirm return the aggregate + # availability zone. + self._add_to_aggregate(service) + new_service = az.set_availability_zones(self.context, services)[0] + self.assertEquals(new_service['availability_zone'], + self.availability_zone) + + self._destroy_service(service) + + def test_set_availability_zone_not_compute_service(self): + """Test not compute service get right availability zone.""" + service = self._create_service_with_topic('network') + services = db.service_get_all(self.context) + new_service = az.set_availability_zones(self.context, services)[0] + self.assertEquals(new_service['availability_zone'], + self.default_in_az) + self._destroy_service(service) + + def test_get_host_availability_zone(self): + """Test get right availability zone by given host.""" + self.assertEquals(self.default_az, + az.get_host_availability_zone(self.context, self.host)) + + service = self._create_service_with_topic('compute') + self._add_to_aggregate(service) + + self.assertEquals(self.availability_zone, + az.get_host_availability_zone(self.context, self.host)) diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py index 29e2e978b..79b5ae66a 100644 --- a/nova/tests/test_cinder.py +++ b/nova/tests/test_cinder.py @@ -98,13 +98,14 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient): class FakeCinderClient(cinder.cinder_client.Client): def __init__(self, username, password, project_id=None, auth_url=None, - retries=None): + insecure=False, retries=None): super(FakeCinderClient, self).__init__(username, password, project_id=project_id, auth_url=auth_url, + insecure=insecure, retries=retries) self.client = FakeHTTPClient(username, password, project_id, auth_url, - retries=retries) + insecure=insecure, retries=retries) # keep a ref to the clients callstack for factory's assert_called self.callstack = self.client.callstack = [] @@ -177,6 +178,15 @@ class CinderTestCase(test.TestCase): self.assertTrue('volume_image_metadata' in volume) self.assertEqual(volume['volume_image_metadata'], _image_metadata) + def test_cinder_api_insecure(self): + # The True/False negation is awkward, but better for the client + # to pass us insecure=True and we check verify_cert == False + self.flags(cinder_api_insecure=True) + volume = self.api.get(self.context, '1234') + self.assert_called('GET', '/volumes/1234') + self.assertEquals( + self.fake_client_factory.client.client.verify_cert, False) + def test_cinder_http_retries(self): retries = 42 self.flags(cinder_http_retries=retries) diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py index 260ab28c7..28fa423e0 100644 --- a/nova/tests/test_configdrive2.py +++ b/nova/tests/test_configdrive2.py @@ -67,11 +67,9 @@ class ConfigDriveTestCase(test.TestCase): utils.mkfs('vfat', mox.IgnoreArg(), label='config-2').AndReturn(None) - utils.trycmd('mount', '-o', 'loop', mox.IgnoreArg(), + utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(), run_as_root=True).AndReturn((None, None)) - utils.trycmd('chown', mox.IgnoreArg(), mox.IgnoreArg(), - run_as_root=True).AndReturn((None, None)) utils.execute('umount', mox.IgnoreArg(), run_as_root=True).AndReturn(None) diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py index 83010cee2..25df336fb 100644 --- a/nova/tests/test_crypto.py +++ b/nova/tests/test_crypto.py @@ -149,3 +149,66 @@ class CertExceptionTests(test.TestCase): self.assertRaises(exception.CryptoCRLFileNotFound, crypto.fetch_crl, project_id='fake') + + +class EncryptionTests(test.TestCase): + pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv" + "zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4" + "fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi" + "goi+MSDPD7+5yRJ9z4mH9h7MCY6Ejv8KTcNYmVHvRhsFUcVhWcIISlNWUGiG7rf" + "oki060F5myQN3AXcL8gHG5/Qb1RVkQFUKZ5geQ39/wSyYA1Q65QTba/5G2QNbl2" + "0eAIBTyKZhN6g88ak+yARa6BLLDkrlP7L4WctHQMLsuXHohQsUO9AcOlVMARgrg" + "uF test@test") + prikey = """-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAwK7YK3wbv4NqNvKPh9p4P3K784BEvdYvzTTzxUzrrV3payfU +Ij0z05vgfMrKS7F4n5m7Jl87TTpQUIEF7OrrMW+H5ySjVGfOFZKeXrG6ivy+3ska +sD76qO27SgTKADFV/5FMqIzKcW+0rIejOmPHKYoKIvjEgzw+/uckSfc+Jh/YezAm +OhI7/Ck3DWJlR70YbBVHFYVnCCEpTVlBohu636JItOtBeZskDdwF3C/IBxuf0G9U +VZEBVCmeYHkN/f8EsmANUOuUE22v+RtkDW5dtHgCAU8imYTeoPPGpPsgEWugSyw5 +K5T+y+FnLR0DC7Llx6IULFDvQHDpVTAEYK4LhQIDAQABAoIBAF9ibrrgHnBpItx+ +qVUMbriiGK8LUXxUmqdQTljeolDZi6KzPc2RVKWtpazBSvG7skX3+XCediHd+0JP +DNri1HlNiA6B0aUIGjoNsf6YpwsE4YwyK9cR5k5YGX4j7se3pKX2jOdngxQyw1Mh +dkmCeWZz4l67nbSFz32qeQlwrsB56THJjgHB7elDoGCXTX/9VJyjFlCbfxVCsIng +inrNgT0uMSYMNpAjTNOjguJt/DtXpwzei5eVpsERe0TRRVH23ycS0fuq/ancYwI/ +MDr9KSB8r+OVGeVGj3popCxECxYLBxhqS1dAQyJjhQXKwajJdHFzidjXO09hLBBz +FiutpYUCgYEA6OFikTrPlCMGMJjSj+R9woDAOPfvCDbVZWfNo8iupiECvei88W28 +RYFnvUQRjSC0pHe//mfUSmiEaE+SjkNCdnNR+vsq9q+htfrADm84jl1mfeWatg/g +zuGz2hAcZnux3kQMI7ufOwZNNpM2bf5B4yKamvG8tZRRxSkkAL1NV48CgYEA08/Z +Ty9g9XPKoLnUWStDh1zwG+c0q14l2giegxzaUAG5DOgOXbXcw0VQ++uOWD5ARELG +g9wZcbBsXxJrRpUqx+GAlv2Y1bkgiPQS1JIyhsWEUtwfAC/G+uZhCX53aI3Pbsjh +QmkPCSp5DuOuW2PybMaw+wVe+CaI/gwAWMYDAasCgYEA4Fzkvc7PVoU33XIeywr0 +LoQkrb4QyPUrOvt7H6SkvuFm5thn0KJMlRpLfAksb69m2l2U1+HooZd4mZawN+eN +DNmlzgxWJDypq83dYwq8jkxmBj1DhMxfZnIE+L403nelseIVYAfPLOqxUTcbZXVk +vRQFp+nmSXqQHUe5rAy1ivkCgYEAqLu7cclchCxqDv/6mc5NTVhMLu5QlvO5U6fq +HqitgW7d69oxF5X499YQXZ+ZFdMBf19ypTiBTIAu1M3nh6LtIa4SsjXzus5vjKpj +FdQhTBus/hU83Pkymk1MoDOPDEtsI+UDDdSDldmv9pyKGWPVi7H86vusXCLWnwsQ +e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap +6S1ziw+XWfdQ83VIUOCL5DrwmcYzLIogS0agmnx/monfDx0Nl9+OZRxy6+AI9vkK +86A1+DXdo+IgX3grFK1l1gPhAZPRWJZ+anrEkyR4iLq6ZoPZ3BQn97U= +-----END RSA PRIVATE KEY-----""" + text = "Some text! %$*" + + def _ssh_decrypt_text(self, ssh_private_key, text): + with utils.tempdir() as tmpdir: + sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key')) + with open(sshkey, 'w') as f: + f.write(ssh_private_key) + try: + dec, _err = utils.execute('openssl', + 'rsautl', + '-decrypt', + '-inkey', sshkey, + process_input=text) + return dec + except exception.ProcessExecutionError as exc: + raise exception.DecryptionFailure(reason=exc.stderr) + + def test_ssh_encrypt_decrypt_text(self): + enc = crypto.ssh_encrypt_text(self.pubkey, self.text) + self.assertNotEqual(enc, self.text) + result = self._ssh_decrypt_text(self.prikey, enc) + self.assertEqual(result, self.text) + + def test_ssh_encrypt_failure(self): + self.assertRaises(exception.EncryptionFailure, + crypto.ssh_encrypt_text, '', self.text) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 7df28bfcb..e43a32c19 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -261,6 +261,32 @@ class DbApiTestCase(test.TestCase): res = db.floating_ip_disassociate(ctxt, floating) self.assertEqual(res, None) + def test_fixed_ip_get_by_floating_address(self): + ctxt = context.get_admin_context() + values = {'address': 'fixed'} + fixed = db.fixed_ip_create(ctxt, values) + fixed_ip_ref = db.fixed_ip_get_by_address(ctxt, fixed) + values = {'address': 'floating', + 'fixed_ip_id': fixed_ip_ref['id']} + floating = db.floating_ip_create(ctxt, values) + fixed_ip_ref = db.fixed_ip_get_by_floating_address(ctxt, floating) + self.assertEqual(fixed, fixed_ip_ref['address']) + + def test_floating_ip_get_by_fixed_address(self): + ctxt = context.get_admin_context() + values = {'address': 'fixed'} + fixed = db.fixed_ip_create(ctxt, values) + fixed_ip_ref = db.fixed_ip_get_by_address(ctxt, fixed) + values = {'address': 'floating1', + 'fixed_ip_id': fixed_ip_ref['id']} + floating1 = db.floating_ip_create(ctxt, values) + values = {'address': 'floating2', + 'fixed_ip_id': fixed_ip_ref['id']} + floating2 = db.floating_ip_create(ctxt, values) + floating_ip_refs = db.floating_ip_get_by_fixed_address(ctxt, fixed) + self.assertEqual(floating1, floating_ip_refs[0]['address']) + self.assertEqual(floating2, floating_ip_refs[1]['address']) + def test_network_create_safe(self): ctxt = context.get_admin_context() values = {'host': 'localhost', 'project_id': 'project1'} @@ -299,27 +325,6 @@ class DbApiTestCase(test.TestCase): self.assertRaises(exception.DuplicateVlan, db.network_create_safe, ctxt, values2) - def test_instance_test_and_set(self): - ctxt = context.get_admin_context() - states = [ - (None, [None, 'some'], 'building'), - (None, [None], 'building'), - ('building', ['building'], 'ready'), - ('building', [None, 'building'], 'ready')] - for st in states: - inst = db.instance_create(ctxt, {'vm_state': st[0]}) - uuid = inst['uuid'] - db.instance_test_and_set(ctxt, uuid, 'vm_state', st[1], st[2]) - inst = db.instance_get_by_uuid(ctxt, uuid) - self.assertEqual(inst["vm_state"], st[2]) - - def test_instance_test_and_set_exception(self): - ctxt = context.get_admin_context() - inst = db.instance_create(ctxt, {'vm_state': 'building'}) - self.assertRaises(exception.InstanceInvalidState, - db.instance_test_and_set, ctxt, - inst['uuid'], 'vm_state', [None, 'disable'], 'run') - def test_instance_update_with_instance_uuid(self): # test instance_update() works when an instance UUID is passed. ctxt = context.get_admin_context() diff --git a/nova/tests/test_driver.py b/nova/tests/test_driver.py new file mode 100644 index 000000000..2dee7725f --- /dev/null +++ b/nova/tests/test_driver.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Citrix Systems, Inc. +# Copyright 2013 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova.virt import driver + + +class FakeDriver(object): + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class FakeDriver2(FakeDriver): + pass + + +class ToDriverRegistryTestCase(test.TestCase): + + def assertDriverInstance(self, inst, class_, *args, **kwargs): + self.assertEquals(class_, inst.__class__) + self.assertEquals(args, inst.args) + self.assertEquals(kwargs, inst.kwargs) + + def test_driver_dict_from_config(self): + drvs = driver.driver_dict_from_config( + [ + 'key1=nova.tests.test_driver.FakeDriver', + 'key2=nova.tests.test_driver.FakeDriver2', + ], 'arg1', 'arg2', param1='value1', param2='value2' + ) + + self.assertEquals( + sorted(['key1', 'key2']), + sorted(drvs.keys()) + ) + + self.assertDriverInstance( + drvs['key1'], + FakeDriver, 'arg1', 'arg2', param1='value1', + param2='value2') + + self.assertDriverInstance( + drvs['key2'], + FakeDriver2, 'arg1', 'arg2', param1='value1', + param2='value2') diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py index 9e34f287c..ad67cff26 100644 --- a/nova/tests/test_exception.py +++ b/nova/tests/test_exception.py @@ -52,23 +52,23 @@ class FakeNotifier(object): self.provided_context = context -def good_function(): +def good_function(self, context): return 99 -def bad_function_exception(blah="a", boo="b", context=None): +def bad_function_exception(self, context, extra, blah="a", boo="b", zoo=None): raise test.TestingException() class WrapExceptionTestCase(test.TestCase): def test_wrap_exception_good_return(self): wrapped = exception.wrap_exception() - self.assertEquals(99, wrapped(good_function)()) + self.assertEquals(99, wrapped(good_function)(1, 2)) def test_wrap_exception_throws_exception(self): wrapped = exception.wrap_exception() self.assertRaises(test.TestingException, - wrapped(bad_function_exception)) + wrapped(bad_function_exception), 1, 2, 3) def test_wrap_exception_with_notifier(self): notifier = FakeNotifier() @@ -76,7 +76,7 @@ class WrapExceptionTestCase(test.TestCase): "level") ctxt = context.get_admin_context() self.assertRaises(test.TestingException, - wrapped(bad_function_exception), context=ctxt) + wrapped(bad_function_exception), 1, ctxt, 3, zoo=3) self.assertEquals(notifier.provided_publisher, "publisher") self.assertEquals(notifier.provided_event, "event") self.assertEquals(notifier.provided_priority, "level") @@ -88,7 +88,7 @@ class WrapExceptionTestCase(test.TestCase): notifier = FakeNotifier() wrapped = exception.wrap_exception(notifier) self.assertRaises(test.TestingException, - wrapped(bad_function_exception)) + wrapped(bad_function_exception), 1, 2, 3) self.assertEquals(notifier.provided_publisher, None) self.assertEquals(notifier.provided_event, "bad_function_exception") self.assertEquals(notifier.provided_priority, notifier.ERROR) diff --git a/nova/tests/test_fakelibvirt.py b/nova/tests/test_fakelibvirt.py index fea666f36..32c85a95a 100644 --- a/nova/tests/test_fakelibvirt.py +++ b/nova/tests/test_fakelibvirt.py @@ -53,6 +53,7 @@ def get_vm_xml(name="testname", uuid=None, source_type='file', </interface> <input type='mouse' bus='ps2'/> <graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/> + <graphics type='spice' port='5901' autoport='yes' keymap='en-us'/> </devices> </domain>''' % {'name': name, 'uuid_tag': uuid_tag, diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py index f5713c457..9fec9d151 100644 --- a/nova/tests/test_hypervapi.py +++ b/nova/tests/test_hypervapi.py @@ -68,7 +68,8 @@ class HyperVAPITestCase(basetestcase.BaseTestCase): self._setup_stubs() self.flags(instances_path=r'C:\Hyper-V\test\instances', - vswitch_name='external') + vswitch_name='external', + network_api_class='nova.network.quantumv2.api.API') self._hypervutils = hypervutils.HyperVUtils() self._conn = driver_hyperv.HyperVDriver(None) @@ -119,6 +120,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase): from nova.virt.hyperv import hostops from nova.virt.hyperv import livemigrationops from nova.virt.hyperv import snapshotops + from nova.virt.hyperv import vif from nova.virt.hyperv import vmops from nova.virt.hyperv import volumeops from nova.virt.hyperv import volumeutils @@ -129,6 +131,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase): basevolumeutils, baseops, hostops, + vif, vmops, vmutils, volumeops, @@ -240,6 +243,9 @@ class HyperVAPITestCase(basetestcase.BaseTestCase): self.assertEquals(len(dvd_paths), 0) def test_spawn_no_vswitch_exception(self): + self.flags(network_api_class='nova.network.api.API') + # Reinstantiate driver, as the VIF plugin is loaded during __init__ + self._conn = driver_hyperv.HyperVDriver(None) # Set flag to a non existing vswitch self.flags(vswitch_name=str(uuid.uuid4())) self.assertRaises(vmutils.HyperVException, self._spawn_instance, True) diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py index 82b5eb475..495e7c947 100644 --- a/nova/tests/test_imagebackend.py +++ b/nova/tests/test_imagebackend.py @@ -22,6 +22,7 @@ from nova.openstack.common import cfg from nova import test from nova.tests import fake_libvirt_utils from nova.virt.libvirt import imagebackend +from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF @@ -38,12 +39,12 @@ class _ImageTestCase(object): super(_ImageTestCase, self).setUp() self.flags(disable_process_locking=True, instances_path=self.INSTANCES_PATH) - self.INSTANCE = 'instance' + self.INSTANCE = {'name': 'instance'} self.NAME = 'fake.vm' self.TEMPLATE = 'template' - self.PATH = os.path.join(CONF.instances_path, self.INSTANCE, - self.NAME) + self.PATH = os.path.join( + libvirt_utils.get_instance_path(self.INSTANCE), self.NAME) self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base') self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template') @@ -215,7 +216,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase): self.image_class = imagebackend.Lvm super(LvmTestCase, self).setUp() self.flags(libvirt_images_volume_group=self.VG) - self.LV = '%s_%s' % (self.INSTANCE, self.NAME) + self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME) self.PATH = os.path.join('/dev', self.VG, self.LV) self.disk = imagebackend.disk @@ -272,7 +273,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase): cmd = ('dd', 'if=%s' % self.TEMPLATE_PATH, 'of=%s' % self.PATH, 'bs=4M') self.utils.execute(*cmd, run_as_root=True) - self.disk.resize2fs(self.PATH) + self.disk.resize2fs(self.PATH, run_as_root=True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) @@ -342,7 +343,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase): class BackendTestCase(test.TestCase): - INSTANCE = 'fake-instance' + INSTANCE = {'name': 'fake-instance'} NAME = 'fake-name.suffix' def get_image(self, use_cow, image_type): diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py index eaf244c56..8142312b9 100644 --- a/nova/tests/test_imagecache.py +++ b/nova/tests/test_imagecache.py @@ -721,7 +721,7 @@ class ImageCacheManagerTestCase(test.TestCase): def fq_path(path): return os.path.join('/instance_path/_base/', path) - # Fake base directory existance + # Fake base directory existence orig_exists = os.path.exists def exists(path): @@ -747,7 +747,7 @@ class ImageCacheManagerTestCase(test.TestCase): '/instance_path/_base/%s_sm' % hashed_42]: return False - self.fail('Unexpected path existance check: %s' % path) + self.fail('Unexpected path existence check: %s' % path) self.stubs.Set(os.path, 'exists', lambda x: exists(x)) diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 4a136cf13..b70b96b7f 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -142,6 +142,67 @@ class InstanceTypeTestCase(test.TestCase): self.assertRaises(exception.InvalidInput, instance_types.create, name, 256, 1, 120, 100, flavorid) + def test_add_instance_type_access(self): + user_id = 'fake' + project_id = 'fake' + ctxt = context.RequestContext(user_id, project_id, is_admin=True) + flavor_id = 'flavor1' + type_ref = instance_types.create('some flavor', 256, 1, 120, 100, + flavorid=flavor_id) + access_ref = instance_types.add_instance_type_access(flavor_id, + project_id, + ctxt=ctxt) + self.assertEqual(access_ref["project_id"], project_id) + self.assertEqual(access_ref["instance_type_id"], type_ref["id"]) + + def test_add_instance_type_access_already_exists(self): + user_id = 'fake' + project_id = 'fake' + ctxt = context.RequestContext(user_id, project_id, is_admin=True) + flavor_id = 'flavor1' + type_ref = instance_types.create('some flavor', 256, 1, 120, 100, + flavorid=flavor_id) + access_ref = instance_types.add_instance_type_access(flavor_id, + project_id, + ctxt=ctxt) + self.assertRaises(exception.FlavorAccessExists, + instance_types.add_instance_type_access, + flavor_id, project_id, ctxt) + + def test_add_instance_type_access_invalid_flavor(self): + user_id = 'fake' + project_id = 'fake' + ctxt = context.RequestContext(user_id, project_id, is_admin=True) + flavor_id = 'no_such_flavor' + self.assertRaises(exception.FlavorNotFound, + instance_types.add_instance_type_access, + flavor_id, project_id, ctxt) + + def test_remove_instance_type_access(self): + user_id = 'fake' + project_id = 'fake' + ctxt = context.RequestContext(user_id, project_id, is_admin=True) + flavor_id = 'flavor1' + it = instance_types + type_ref = it.create('some flavor', 256, 1, 120, 100, + flavorid=flavor_id) + access_ref = it.add_instance_type_access(flavor_id, project_id, ctxt) + it.remove_instance_type_access(flavor_id, project_id, ctxt) + + projects = it.get_instance_type_access_by_flavor_id(flavor_id, ctxt) + self.assertEqual([], projects) + + def test_remove_instance_type_access_doesnt_exists(self): + user_id = 'fake' + project_id = 'fake' + ctxt = context.RequestContext(user_id, project_id, is_admin=True) + flavor_id = 'flavor1' + type_ref = instance_types.create('some flavor', 256, 1, 120, 100, + flavorid=flavor_id) + self.assertRaises(exception.FlavorAccessNotFound, + instance_types.remove_instance_type_access, + flavor_id, project_id, ctxt=ctxt) + def test_get_all_instance_types(self): # Ensures that all instance types can be retrieved. session = sql_session.get_session() diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py index f53840b86..f48c2efe8 100644 --- a/nova/tests/test_instance_types_extra_specs.py +++ b/nova/tests/test_instance_types_extra_specs.py @@ -18,6 +18,7 @@ Unit Tests for instance types extra specs code from nova import context from nova import db +from nova import exception from nova import test @@ -87,6 +88,13 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase): self.flavorid) self.assertEquals(expected_specs, actual_specs) + def test_instance_type_extra_specs_update_with_nonexisting_flavor(self): + extra_specs = dict(cpu_arch="x86_64") + nonexisting_flavorid = "some_flavor_that_doesnt_exists" + self.assertRaises(exception.FlavorNotFound, + db.instance_type_extra_specs_update_or_create, + self.context, nonexisting_flavorid, extra_specs) + def test_instance_type_extra_specs_create(self): expected_specs = dict(cpu_arch="x86_64", cpu_model="Nehalem", diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 53bb1b984..75e758cde 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -478,7 +478,8 @@ class CacheConcurrencyTestCase(test.TestCase): wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() - thr1 = eventlet.spawn(backend.image('instance', 'name').cache, + thr1 = eventlet.spawn(backend.image({'name': 'instance'}, + 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) @@ -488,7 +489,8 @@ class CacheConcurrencyTestCase(test.TestCase): wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() - thr2 = eventlet.spawn(backend.image('instance', 'name').cache, + thr2 = eventlet.spawn(backend.image({'name': 'instance'}, + 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) @@ -512,7 +514,8 @@ class CacheConcurrencyTestCase(test.TestCase): wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() - thr1 = eventlet.spawn(backend.image('instance', 'name').cache, + thr1 = eventlet.spawn(backend.image({'name': 'instance'}, + 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) @@ -522,7 +525,8 @@ class CacheConcurrencyTestCase(test.TestCase): wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() - thr2 = eventlet.spawn(backend.image('instance', 'name').cache, + thr2 = eventlet.spawn(backend.image({'name': 'instance'}, + 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) @@ -566,7 +570,6 @@ class LibvirtConnTestCase(test.TestCase): self.context = context.get_admin_context() self.flags(instances_path='') self.flags(libvirt_snapshots_directory='') - self.call_libvirt_dependant_setup = False self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) @@ -765,6 +768,164 @@ class LibvirtConnTestCase(test.TestCase): vconfig.LibvirtConfigGuestDisk) self.assertEquals(cfg.devices[3].target_dev, 'vdd') + def test_get_guest_config_with_configdrive(self): + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + # make configdrive.enabled_for() return True + instance_ref['config_drive'] = 'ANY_ID' + + cfg = conn.get_guest_config(instance_ref, [], None, None) + + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(cfg.devices[2].target_dev, + conn.default_last_device) + + def test_get_guest_config_with_vnc(self): + self.flags(libvirt_type='kvm', + vnc_enabled=True, + use_usb_tablet=False) + self.flags(enabled=False, group='spice') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn.get_guest_config(instance_ref, [], None, None) + self.assertEquals(len(cfg.devices), 5) + self.assertEquals(type(cfg.devices[0]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[1]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[3]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[4]), + vconfig.LibvirtConfigGuestGraphics) + + self.assertEquals(cfg.devices[4].type, "vnc") + + def test_get_guest_config_with_vnc_and_tablet(self): + self.flags(libvirt_type='kvm', + vnc_enabled=True, + use_usb_tablet=True) + self.flags(enabled=False, group='spice') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn.get_guest_config(instance_ref, [], None, None) + self.assertEquals(len(cfg.devices), 6) + self.assertEquals(type(cfg.devices[0]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[1]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[3]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[4]), + vconfig.LibvirtConfigGuestInput) + self.assertEquals(type(cfg.devices[5]), + vconfig.LibvirtConfigGuestGraphics) + + self.assertEquals(cfg.devices[4].type, "tablet") + self.assertEquals(cfg.devices[5].type, "vnc") + + def test_get_guest_config_with_spice_and_tablet(self): + self.flags(libvirt_type='kvm', + vnc_enabled=False, + use_usb_tablet=True) + self.flags(enabled=True, + agent_enabled=False, + group='spice') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn.get_guest_config(instance_ref, [], None, None) + self.assertEquals(len(cfg.devices), 6) + self.assertEquals(type(cfg.devices[0]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[1]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[3]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[4]), + vconfig.LibvirtConfigGuestInput) + self.assertEquals(type(cfg.devices[5]), + vconfig.LibvirtConfigGuestGraphics) + + self.assertEquals(cfg.devices[4].type, "tablet") + self.assertEquals(cfg.devices[5].type, "spice") + + def test_get_guest_config_with_spice_and_agent(self): + self.flags(libvirt_type='kvm', + vnc_enabled=False, + use_usb_tablet=True) + self.flags(enabled=True, + agent_enabled=True, + group='spice') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn.get_guest_config(instance_ref, [], None, None) + self.assertEquals(len(cfg.devices), 6) + self.assertEquals(type(cfg.devices[0]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[1]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[3]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[4]), + vconfig.LibvirtConfigGuestChannel) + self.assertEquals(type(cfg.devices[5]), + vconfig.LibvirtConfigGuestGraphics) + + self.assertEquals(cfg.devices[4].target_name, "com.redhat.spice.0") + self.assertEquals(cfg.devices[5].type, "spice") + + def test_get_guest_config_with_vnc_and_spice(self): + self.flags(libvirt_type='kvm', + vnc_enabled=True, + use_usb_tablet=True) + self.flags(enabled=True, + agent_enabled=True, + group='spice') + + conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) + instance_ref = db.instance_create(self.context, self.test_instance) + + cfg = conn.get_guest_config(instance_ref, [], None, None) + self.assertEquals(len(cfg.devices), 8) + self.assertEquals(type(cfg.devices[0]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[1]), + vconfig.LibvirtConfigGuestDisk) + self.assertEquals(type(cfg.devices[2]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[3]), + vconfig.LibvirtConfigGuestSerial) + self.assertEquals(type(cfg.devices[4]), + vconfig.LibvirtConfigGuestInput) + self.assertEquals(type(cfg.devices[5]), + vconfig.LibvirtConfigGuestChannel) + self.assertEquals(type(cfg.devices[6]), + vconfig.LibvirtConfigGuestGraphics) + self.assertEquals(type(cfg.devices[7]), + vconfig.LibvirtConfigGuestGraphics) + + self.assertEquals(cfg.devices[4].type, "tablet") + self.assertEquals(cfg.devices[5].target_name, "com.redhat.spice.0") + self.assertEquals(cfg.devices[6].type, "vnc") + self.assertEquals(cfg.devices[7].type, "spice") + def test_get_guest_cpu_config_none(self): self.flags(libvirt_cpu_mode="none") conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) @@ -904,6 +1065,9 @@ class LibvirtConnTestCase(test.TestCase): cpu.model = "Opteron_G4" cpu.vendor = "AMD" + cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("tm2")) + cpu.features.append(vconfig.LibvirtConfigGuestCPUFeature("ht")) + caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu @@ -927,6 +1091,9 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(conf.cpu.mode, None) self.assertEquals(conf.cpu.model, "Opteron_G4") self.assertEquals(conf.cpu.vendor, "AMD") + self.assertEquals(len(conf.cpu.features), 2) + self.assertEquals(conf.cpu.features[0].name, "tm2") + self.assertEquals(conf.cpu.features[1].name, "ht") def test_get_guest_cpu_config_custom_old(self): def get_lib_version_stub(self): @@ -2700,11 +2867,11 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(conn, "_wrapped_conn") - self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities") + self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion") self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code") self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain") - conn._wrapped_conn.getCapabilities().AndRaise( + conn._wrapped_conn.getLibVersion().AndRaise( libvirt.libvirtError("fake failure")) libvirt.libvirtError.get_error_code().AndReturn(error) @@ -2932,7 +3099,7 @@ class LibvirtConnTestCase(test.TestCase): self.stubs.Set(conn, 'get_info', fake_get_info) instance = {"name": "instancename", "id": "instanceid", "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"} - # NOTE(vish): verifies destory doesn't raise if the instance disappears + # NOTE(vish): verifies destroy doesn't raise if the instance disappears conn._destroy(instance) def test_available_least_handles_missing(self): @@ -3539,30 +3706,25 @@ class IptablesFirewallTestCase(test.TestCase): fake.FakeVirtAPI(), get_connection=lambda: self.fake_libvirt_connection) - in_nat_rules = [ + in_rules = [ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', '*nat', ':PREROUTING ACCEPT [1170:189210]', ':INPUT ACCEPT [844:71028]', ':OUTPUT ACCEPT [5149:405186]', ':POSTROUTING ACCEPT [5063:386098]', - ] - - in_mangle_rules = [ - '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;', - '*mangle', - ':PREROUTING ACCEPT [241:39722]', - ':INPUT ACCEPT [230:39282]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [266:26558]', - ':POSTROUTING ACCEPT [267:26590]', - '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM ' - '--checksum-fill', - 'COMMIT', - '# Completed on Tue Dec 18 15:50:25 2012', - ] - - in_filter_rules = [ + '# Completed on Tue Dec 18 15:50:25 2012', + '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;', + '*mangle', + ':PREROUTING ACCEPT [241:39722]', + ':INPUT ACCEPT [230:39282]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [266:26558]', + ':POSTROUTING ACCEPT [267:26590]', + '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM ' + '--checksum-fill', + 'COMMIT', + '# Completed on Tue Dec 18 15:50:25 2012', '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', ':INPUT ACCEPT [969615:281627771]', @@ -3657,15 +3819,11 @@ class IptablesFirewallTestCase(test.TestCase): # self.fw.add_instance(instance_ref) def fake_iptables_execute(*cmd, **kwargs): process_input = kwargs.get('process_input', None) - if cmd == ('ip6tables-save', '-c', '-t', 'filter'): + if cmd == ('ip6tables-save', '-c'): return '\n'.join(self.in6_filter_rules), None - if cmd == ('iptables-save', '-c', '-t', 'filter'): - return '\n'.join(self.in_filter_rules), None - if cmd == ('iptables-save', '-c', '-t', 'nat'): - return '\n'.join(self.in_nat_rules), None - if cmd == ('iptables-save', '-c', '-t', 'mangle'): - return '\n'.join(self.in_mangle_rules), None - if cmd == ('iptables-restore', '-c',): + if cmd == ('iptables-save', '-c'): + return '\n'.join(self.in_rules), None + if cmd == ('iptables-restore', '-c'): lines = process_input.split('\n') if '*filter' in lines: self.out_rules = lines @@ -3689,7 +3847,7 @@ class IptablesFirewallTestCase(test.TestCase): self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), - self.in_filter_rules) + self.in_rules) for rule in in_rules: if not 'nova' in rule: self.assertTrue(rule in self.out_rules, @@ -4436,7 +4594,7 @@ class LibvirtDriverTestCase(test.TestCase): pass def fake_to_xml(instance, network_info, image_meta=None, rescue=None, - block_device_info=None): + block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): @@ -4447,7 +4605,7 @@ class LibvirtDriverTestCase(test.TestCase): block_device_info=None): pass - def fake_create_domain(xml, inst_name=''): + def fake_create_domain(xml, instance=None): return None def fake_enable_hairpin(instance): @@ -4493,7 +4651,7 @@ class LibvirtDriverTestCase(test.TestCase): def fake_plug_vifs(instance, network_info): pass - def fake_create_domain(xml, inst_name=''): + def fake_create_domain(xml, instance=None): return None def fake_enable_hairpin(instance): diff --git a/nova/tests/test_libvirt_config.py b/nova/tests/test_libvirt_config.py index 5eafba841..56719de11 100644 --- a/nova/tests/test_libvirt_config.py +++ b/nova/tests/test_libvirt_config.py @@ -539,6 +539,29 @@ class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest): <console type="pty"/>""") +class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest): + def test_config_spice_minimal(self): + obj = config.LibvirtConfigGuestChannel() + obj.type = "spicevmc" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + <channel type="spicevmc"> + <target type='virtio'/> + </channel>""") + + def test_config_spice_full(self): + obj = config.LibvirtConfigGuestChannel() + obj.type = "spicevmc" + obj.target_name = "com.redhat.spice.0" + + xml = obj.to_xml() + self.assertXmlEqual(xml, """ + <channel type="spicevmc"> + <target type='virtio' name='com.redhat.spice.0'/> + </channel>""") + + class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest): def test_config_ethernet(self): obj = config.LibvirtConfigGuestInterface() diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py index aeebb5742..11ffa020f 100644 --- a/nova/tests/test_libvirt_vif.py +++ b/nova/tests/test_libvirt_vif.py @@ -47,7 +47,8 @@ class LibvirtVifTestCase(test.TestCase): 'gateway_v6': net['gateway_v6'], 'ips': [{'ip': '101.168.1.9'}], 'dhcp_server': '191.168.1.1', - 'vif_uuid': 'vif-xxx-yyy-zzz' + 'vif_uuid': 'vif-xxx-yyy-zzz', + 'vif_devname': 'tap-xxx-yyy-zzz' } instance = { @@ -229,7 +230,7 @@ class LibvirtVifTestCase(test.TestCase): self.assertEqual(node.get("type"), "bridge") br_name = node.find("source").get("bridge") - self.assertEqual(br_name, CONF.libvirt_ovs_bridge) + self.assertEqual(br_name, "br0") mac = node.find("mac").get("address") self.assertEqual(mac, self.mapping['mac']) vp = node.find("virtualport") @@ -257,7 +258,7 @@ class LibvirtVifTestCase(test.TestCase): mac = node.find("mac").get("address") self.assertEqual(mac, self.mapping['mac']) br_name = node.find("source").get("bridge") - self.assertTrue(br_name.startswith("brq")) + self.assertEqual(br_name, "br0") def test_quantum_hybrid_driver(self): d = vif.LibvirtHybridOVSBridgeDriver() diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index 29e63aba7..f15d71633 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -342,7 +342,7 @@ class OpenStackMetadataTestCase(test.TestCase): mdinst = fake_InstanceMetadata(self.stubs, inst) # since this instance had no user-data it should not be there. - self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10")) + self.assertFalse('user_data' in mdinst.lookup("/openstack/2012-08-10")) self.assertRaises(base.InvalidMetadataPath, mdinst.lookup, "/openstack/2012-08-10/user_data") @@ -362,6 +362,14 @@ class OpenStackMetadataTestCase(test.TestCase): mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json") self.assertFalse("random_seed" in json.loads(mdjson)) + def test_no_dashes_in_metadata(self): + # top level entries in meta_data should not contain '-' in their name + inst = copy(self.instance) + mdinst = fake_InstanceMetadata(self.stubs, inst) + mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json")) + + self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1]) + class MetadataHandlerTestCase(test.TestCase): """Test that metadata is returning proper values.""" diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py index 750326592..3e9da9594 100644 --- a/nova/tests/test_migrations.py +++ b/nova/tests/test_migrations.py @@ -42,37 +42,48 @@ from nova import test LOG = logging.getLogger(__name__) -def _mysql_get_connect_string(user="openstack_citest", - passwd="openstack_citest", - database="openstack_citest"): +def _get_connect_string(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): """ - Try to get a connection with a very specfic set of values, if we get - these then we'll run the mysql tests, otherwise they are skipped + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped """ - return "mysql://%(user)s:%(passwd)s@localhost/%(database)s" % locals() + if backend == "postgres": + backend = "postgresql+psycopg2" + return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + % locals()) -def _is_mysql_avail(user="openstack_citest", - passwd="openstack_citest", - database="openstack_citest"): + +def _is_backend_avail(backend, + user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): try: - connect_uri = _mysql_get_connect_string( - user=user, passwd=passwd, database=database) + if backend == "mysql": + connect_uri = _get_connect_string("mysql", + user=user, passwd=passwd, database=database) + elif backend == "postgres": + connect_uri = _get_connect_string("postgres", + user=user, passwd=passwd, database=database) engine = sqlalchemy.create_engine(connect_uri) connection = engine.connect() except Exception: # intentionally catch all to handle exceptions even if we don't - # have mysql code loaded at all. + # have any backend code loaded. return False else: connection.close() + engine.dispose() return True def _have_mysql(): present = os.environ.get('NOVA_TEST_MYSQL_PRESENT') if present is None: - return _is_mysql_avail() + return _is_backend_avail('mysql') return present.lower() in ('', 'true') @@ -121,17 +132,10 @@ class TestMigrations(test.TestCase): self._reset_databases() def tearDown(self): - # We destroy the test data store between each test case, # and recreate it, which ensures that we have no side-effects # from the tests self._reset_databases() - - # remove these from the list so they aren't used in the migration tests - if "mysqlcitest" in self.engines: - del self.engines["mysqlcitest"] - if "mysqlcitest" in self.test_databases: - del self.test_databases["mysqlcitest"] super(TestMigrations, self).tearDown() def _reset_databases(self): @@ -142,6 +146,7 @@ class TestMigrations(test.TestCase): for key, engine in self.engines.items(): conn_string = self.test_databases[key] conn_pieces = urlparse.urlparse(conn_string) + engine.dispose() if conn_string.startswith('sqlite'): # We can just delete the SQLite database, which is # the easiest and cleanest solution @@ -172,6 +177,7 @@ class TestMigrations(test.TestCase): database = conn_pieces.path.strip('/') loc_pieces = conn_pieces.netloc.split('@') host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') user = auth_pieces[0] password = "" @@ -183,7 +189,7 @@ class TestMigrations(test.TestCase): "~/.pgpass && chmod 0600 ~/.pgpass" % locals()) execute_cmd(createpgpass) # note(boris-42): We must create and drop database, we can't - # drop database wich we have connected to, so for such + # drop database which we have connected to, so for such # operations there is a special database template1. sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" " '%(sql)s' -d template1") @@ -207,25 +213,31 @@ class TestMigrations(test.TestCase): Test that we can trigger a mysql connection failure and we fail gracefully to ensure we don't break people without mysql """ - if _is_mysql_avail(user="openstack_cifail"): + if _is_backend_avail('mysql', user="openstack_cifail"): self.fail("Shouldn't have connected") - def test_mysql_innodb(self): + def test_mysql_opportunistically(self): # Test that table creation on mysql only builds InnoDB tables - if not _have_mysql(): + if not _is_backend_avail('mysql'): self.skipTest("mysql not available") # add this to the global lists to make reset work with it, it's removed # automatically in tearDown so no need to clean it up here. - connect_string = _mysql_get_connect_string() + connect_string = _get_connect_string("mysql") engine = sqlalchemy.create_engine(connect_string) self.engines["mysqlcitest"] = engine self.test_databases["mysqlcitest"] = connect_string + # Test that we end in an innodb + self._check_mysql_innodb(engine) + # Test IP transition + self._check_mysql_migration_149(engine) + + def _check_mysql_innodb(self, engine): # build a fully populated mysql database with all the tables self._reset_databases() self._walk_versions(engine, False, False) - uri = _mysql_get_connect_string(database="information_schema") + uri = _get_connect_string("mysql", database="information_schema") connection = sqlalchemy.create_engine(uri).connect() # sanity check @@ -242,6 +254,91 @@ class TestMigrations(test.TestCase): count = noninnodb.scalar() self.assertEqual(count, 0, "%d non InnoDB tables created" % count) + def test_migration_149_postgres(self): + """Test updating a table with IPAddress columns.""" + if not _is_backend_avail('postgres'): + self.skipTest("postgres not available") + + connect_string = _get_connect_string("postgres") + engine = sqlalchemy.create_engine(connect_string) + + self.engines["postgrescitest"] = engine + self.test_databases["postgrescitest"] = connect_string + + self._reset_databases() + migration_api.version_control(engine, TestMigrations.REPOSITORY, + migration.INIT_VERSION) + + connection = engine.connect() + + self._migrate_up(engine, 148) + IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1") + connection.execute("INSERT INTO provider_fw_rules " + " (protocol, from_port, to_port, cidr)" + "VALUES ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s')" % IPS) + self.assertEqual('character varying', + connection.execute( + "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS " + "WHERE table_name='provider_fw_rules' " + "AND table_catalog='openstack_citest' " + "AND column_name='cidr'").scalar()) + + self._migrate_up(engine, 149) + self.assertEqual(IPS, + tuple(tup[0] for tup in connection.execute( + "SELECT cidr from provider_fw_rules").fetchall())) + self.assertEqual('inet', + connection.execute( + "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS " + "WHERE table_name='provider_fw_rules' " + "AND table_catalog='openstack_citest' " + "AND column_name='cidr'").scalar()) + connection.close() + + def _check_mysql_migration_149(self, engine): + """Test updating a table with IPAddress columns.""" + self._reset_databases() + migration_api.version_control(engine, TestMigrations.REPOSITORY, + migration.INIT_VERSION) + + uri = _get_connect_string("mysql", database="openstack_citest") + connection = sqlalchemy.create_engine(uri).connect() + + self._migrate_up(engine, 148) + + IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1") + connection.execute("INSERT INTO provider_fw_rules " + " (protocol, from_port, to_port, cidr)" + "VALUES ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s'), " + " ('tcp', 1234, 1234, '%s')" % IPS) + self.assertEqual('varchar(255)', + connection.execute( + "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS " + "WHERE table_name='provider_fw_rules' " + "AND table_schema='openstack_citest' " + "AND column_name='cidr'").scalar()) + + connection.close() + + self._migrate_up(engine, 149) + + connection = sqlalchemy.create_engine(uri).connect() + + self.assertEqual(IPS, + tuple(tup[0] for tup in connection.execute( + "SELECT cidr from provider_fw_rules").fetchall())) + self.assertEqual('varchar(39)', + connection.execute( + "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS " + "WHERE table_name='provider_fw_rules' " + "AND table_schema='openstack_citest' " + "AND column_name='cidr'").scalar()) + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py deleted file mode 100644 index 1029e0c2c..000000000 --- a/nova/tests/test_nova_rootwrap.py +++ /dev/null @@ -1,198 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2011 OpenStack LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import ConfigParser -import logging -import logging.handlers -import os -import subprocess - -from nova.rootwrap import filters -from nova.rootwrap import wrapper -from nova import test - - -class RootwrapTestCase(test.TestCase): - - def setUp(self): - super(RootwrapTestCase, self).setUp() - self.filters = [ - filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'), - filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"), - filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'), - filters.CommandFilter("/nonexistent/cat", "root"), - filters.CommandFilter("/bin/cat", "root") # Keep this one last - ] - - def test_RegExpFilter_match(self): - usercmd = ["ls", "/root"] - filtermatch = wrapper.match_filter(self.filters, usercmd) - self.assertFalse(filtermatch is None) - self.assertEqual(filtermatch.get_command(usercmd), - ["/bin/ls", "/root"]) - - def test_RegExpFilter_reject(self): - usercmd = ["ls", "root"] - self.assertRaises(wrapper.NoFilterMatched, - wrapper.match_filter, self.filters, usercmd) - - def test_missing_command(self): - valid_but_missing = ["foo_bar_not_exist"] - invalid = ["foo_bar_not_exist_and_not_matched"] - self.assertRaises(wrapper.FilterMatchNotExecutable, - wrapper.match_filter, self.filters, valid_but_missing) - self.assertRaises(wrapper.NoFilterMatched, - wrapper.match_filter, self.filters, invalid) - - def _test_DnsmasqFilter(self, filter_class, config_file_arg): - usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar', - 'dnsmasq', 'foo'] - f = filter_class("/usr/bin/dnsmasq", "root") - self.assertTrue(f.match(usercmd)) - self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo']) - env = f.get_environment(usercmd) - self.assertEqual(env.get(config_file_arg), 'A') - self.assertEqual(env.get('NETWORK_ID'), 'foobar') - - def test_DnsmasqFilter(self): - self._test_DnsmasqFilter(filters.DnsmasqFilter, 'CONFIG_FILE') - - def test_DeprecatedDnsmasqFilter(self): - self._test_DnsmasqFilter(filters.DeprecatedDnsmasqFilter, 'FLAGFILE') - - def test_KillFilter(self): - if not os.path.exists("/proc/%d" % os.getpid()): - self.skipTest("Test requires /proc filesystem (procfs)") - p = subprocess.Popen(["cat"], stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - try: - f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP") - f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP") - usercmd = ['kill', '-ALRM', p.pid] - # Incorrect signal should fail - self.assertFalse(f.match(usercmd) or f2.match(usercmd)) - usercmd = ['kill', p.pid] - # Providing no signal should fail - self.assertFalse(f.match(usercmd) or f2.match(usercmd)) - # Providing matching signal should be allowed - usercmd = ['kill', '-9', p.pid] - self.assertTrue(f.match(usercmd) or f2.match(usercmd)) - - f = filters.KillFilter("root", "/bin/cat") - f2 = filters.KillFilter("root", "/usr/bin/cat") - usercmd = ['kill', os.getpid()] - # Our own PID does not match /bin/sleep, so it should fail - self.assertFalse(f.match(usercmd) or f2.match(usercmd)) - usercmd = ['kill', 999999] - # Nonexistent PID should fail - self.assertFalse(f.match(usercmd) or f2.match(usercmd)) - usercmd = ['kill', p.pid] - # Providing no signal should work - self.assertTrue(f.match(usercmd) or f2.match(usercmd)) - finally: - # Terminate the "cat" process and wait for it to finish - p.terminate() - p.wait() - - def test_KillFilter_no_raise(self): - # Makes sure ValueError from bug 926412 is gone. - f = filters.KillFilter("root", "") - # Providing anything other than kill should be False - usercmd = ['notkill', 999999] - self.assertFalse(f.match(usercmd)) - # Providing something that is not a pid should be False - usercmd = ['kill', 'notapid'] - self.assertFalse(f.match(usercmd)) - - def test_KillFilter_deleted_exe(self): - # Makes sure deleted exe's are killed correctly. - # See bug #967931. - def fake_readlink(blah): - return '/bin/commandddddd (deleted)' - - f = filters.KillFilter("root", "/bin/commandddddd") - usercmd = ['kill', 1234] - # Providing no signal should work - self.stubs.Set(os, 'readlink', fake_readlink) - self.assertTrue(f.match(usercmd)) - - def test_ReadFileFilter(self): - goodfn = '/good/file.name' - f = filters.ReadFileFilter(goodfn) - usercmd = ['cat', '/bad/file'] - self.assertFalse(f.match(['cat', '/bad/file'])) - usercmd = ['cat', goodfn] - self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn]) - self.assertTrue(f.match(usercmd)) - - def test_exec_dirs_search(self): - # This test supposes you have /bin/cat or /usr/bin/cat locally - f = filters.CommandFilter("cat", "root") - usercmd = ['cat', '/f'] - self.assertTrue(f.match(usercmd)) - self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin', - '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f'])) - - def test_skips(self): - # Check that all filters are skipped and that the last matches - usercmd = ["cat", "/"] - filtermatch = wrapper.match_filter(self.filters, usercmd) - self.assertTrue(filtermatch is self.filters[-1]) - - def test_RootwrapConfig(self): - raw = ConfigParser.RawConfigParser() - - # Empty config should raise ConfigParser.Error - self.assertRaises(ConfigParser.Error, wrapper.RootwrapConfig, raw) - - # Check default values - raw.set('DEFAULT', 'filters_path', '/a,/b') - config = wrapper.RootwrapConfig(raw) - self.assertEqual(config.filters_path, ['/a', '/b']) - self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':')) - self.assertFalse(config.use_syslog) - self.assertEqual(config.syslog_log_facility, - logging.handlers.SysLogHandler.LOG_SYSLOG) - self.assertEqual(config.syslog_log_level, logging.ERROR) - - # Check general values - raw.set('DEFAULT', 'exec_dirs', '/a,/x') - config = wrapper.RootwrapConfig(raw) - self.assertEqual(config.exec_dirs, ['/a', '/x']) - - raw.set('DEFAULT', 'use_syslog', 'oui') - self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) - raw.set('DEFAULT', 'use_syslog', 'true') - config = wrapper.RootwrapConfig(raw) - self.assertTrue(config.use_syslog) - - raw.set('DEFAULT', 'syslog_log_facility', 'moo') - self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) - raw.set('DEFAULT', 'syslog_log_facility', 'local0') - config = wrapper.RootwrapConfig(raw) - self.assertEqual(config.syslog_log_facility, - logging.handlers.SysLogHandler.LOG_LOCAL0) - raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH') - config = wrapper.RootwrapConfig(raw) - self.assertEqual(config.syslog_log_facility, - logging.handlers.SysLogHandler.LOG_AUTH) - - raw.set('DEFAULT', 'syslog_log_level', 'bar') - self.assertRaises(ValueError, wrapper.RootwrapConfig, raw) - raw.set('DEFAULT', 'syslog_log_level', 'INFO') - config = wrapper.RootwrapConfig(raw) - self.assertEqual(config.syslog_log_level, logging.INFO) diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py index 5804ea49b..39669967f 100644 --- a/nova/tests/test_periodic_tasks.py +++ b/nova/tests/test_periodic_tasks.py @@ -17,6 +17,7 @@ import fixtures +import time from nova import manager from nova import test @@ -76,6 +77,19 @@ class Manager(test.TestCase): idle = m.periodic_tasks(None) self.assertAlmostEqual(60, idle, 1) + def test_periodic_tasks_idle_calculation(self): + class Manager(manager.Manager): + @manager.periodic_task(spacing=10) + def bar(self): + return 'bar' + + m = Manager() + m.periodic_tasks(None) + time.sleep(0.1) + idle = m.periodic_tasks(None) + self.assertTrue(idle > 9.7) + self.assertTrue(idle < 9.9) + def test_periodic_tasks_disabled(self): class Manager(manager.Manager): @manager.periodic_task(spacing=-1) diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py index 85c2ca2cd..5cd715552 100644 --- a/nova/tests/test_pipelib.py +++ b/nova/tests/test_pipelib.py @@ -51,11 +51,11 @@ class PipelibTest(test.TestCase): def test_setup_security_group(self): group_name = "%s%s" % (self.project, CONF.vpn_key_suffix) - # First attemp, does not exist (thus its created) + # First attempt, does not exist (thus its created) res1_group = self.cloudpipe.setup_security_group(self.context) self.assertEqual(res1_group, group_name) - # Second attem, it exists in the DB + # Second attempt, it exists in the DB res2_group = self.cloudpipe.setup_security_group(self.context) self.assertEqual(res1_group, res2_group) @@ -64,10 +64,10 @@ class PipelibTest(test.TestCase): with utils.tempdir() as tmpdir: self.flags(keys_path=tmpdir) - # First attemp, key does not exist (thus it is generated) + # First attempt, key does not exist (thus it is generated) res1_key = self.cloudpipe.setup_key_pair(self.context) self.assertEqual(res1_key, key_name) - # Second attem, it exists in the DB + # Second attempt, it exists in the DB res2_key = self.cloudpipe.setup_key_pair(self.context) self.assertEqual(res2_key, res1_key) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 4873714f3..71beed51e 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -112,7 +112,6 @@ class ServiceTestCase(test.TestCase): self.host = 'foo' self.binary = 'nova-fake' self.topic = 'fake' - self.mox.StubOutWithMock(service, 'db') self.mox.StubOutWithMock(db, 'service_create') self.mox.StubOutWithMock(db, 'service_get_by_args') self.flags(use_local=True, group='conductor') diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 2c46b27bd..9eab72c5b 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -757,3 +757,24 @@ class LastBytesTestCase(test.TestCase): content = '1234567890' flo.write(content) self.assertEqual((content, 0), utils.last_bytes(flo, 1000)) + + +class IntLikeTestCase(test.TestCase): + + def test_is_int_like(self): + self.assertTrue(utils.is_int_like(1)) + self.assertTrue(utils.is_int_like("1")) + self.assertTrue(utils.is_int_like("514")) + self.assertTrue(utils.is_int_like("0")) + + self.assertFalse(utils.is_int_like(1.1)) + self.assertFalse(utils.is_int_like("1.1")) + self.assertFalse(utils.is_int_like("1.1.1")) + self.assertFalse(utils.is_int_like(None)) + self.assertFalse(utils.is_int_like("0.")) + self.assertFalse(utils.is_int_like("aaaaaa")) + self.assertFalse(utils.is_int_like("....")) + self.assertFalse(utils.is_int_like("1g")) + self.assertFalse( + utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64")) + self.assertFalse(utils.is_int_like("a1")) diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index 199ae30b1..9747ecccd 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -446,6 +446,15 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase): self.assertIn('port', vnc_console) @catch_notimplementederror + def test_get_spice_console(self): + instance_ref, network_info = self._get_running_instance() + spice_console = self.connection.get_spice_console(instance_ref) + self.assertIn('internal_access_path', spice_console) + self.assertIn('host', spice_console) + self.assertIn('port', spice_console) + self.assertIn('tlsPort', spice_console) + + @catch_notimplementederror def test_get_console_pool_info(self): instance_ref, network_info = self._get_running_instance() console_pool = self.connection.get_console_pool_info(instance_ref) diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 86b3a5730..577d227ce 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -16,7 +16,7 @@ # under the License. """ -Test suite for VMWareAPI. +Test suite for VMwareAPI. """ from nova.compute import power_state @@ -33,11 +33,11 @@ from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import fake as vmwareapi_fake -class VMWareAPIVMTestCase(test.TestCase): +class VMwareAPIVMTestCase(test.TestCase): """Unit tests for Vmware API connection calls.""" def setUp(self): - super(VMWareAPIVMTestCase, self).setUp() + super(VMwareAPIVMTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(vmwareapi_host_ip='test_url', vmwareapi_host_username='test_username', @@ -48,7 +48,7 @@ class VMWareAPIVMTestCase(test.TestCase): vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) - self.conn = driver.VMWareESXDriver(None, False) + self.conn = driver.VMwareESXDriver(None, False) # NOTE(vish): none of the network plugging code is actually # being tested self.network_info = [({'bridge': 'fa0', @@ -78,7 +78,7 @@ class VMWareAPIVMTestCase(test.TestCase): nova.tests.image.fake.stub_out_image_service(self.stubs) def tearDown(self): - super(VMWareAPIVMTestCase, self).tearDown() + super(VMwareAPIVMTestCase, self).tearDown() vmwareapi_fake.cleanup() nova.tests.image.fake.FakeImageService_reset() diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py index b4b25ed97..b04bc3e03 100644 --- a/nova/tests/test_wsgi.py +++ b/nova/tests/test_wsgi.py @@ -21,9 +21,17 @@ import os.path import tempfile +import eventlet + import nova.exception from nova import test import nova.wsgi +import urllib2 +import webob + +SSL_CERT_DIR = os.path.normpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + 'ssl_cert')) class TestLoaderNothingExists(test.TestCase): @@ -99,3 +107,92 @@ class TestWSGIServer(test.TestCase): self.assertNotEqual(0, server.port) server.stop() server.wait() + + +class TestWSGIServerWithSSL(test.TestCase): + """WSGI server with SSL tests.""" + + def setUp(self): + super(TestWSGIServerWithSSL, self).setUp() + self.flags(enabled_ssl_apis=['fake_ssl'], + ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'), + ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key')) + + def test_ssl_server(self): + + def test_app(env, start_response): + start_response('200 OK', {}) + return ['PONG'] + + fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, + host="127.0.0.1", port=0, + use_ssl=True) + fake_ssl_server.start() + self.assertNotEqual(0, fake_ssl_server.port) + + cli = eventlet.connect(("localhost", fake_ssl_server.port)) + cli = eventlet.wrap_ssl(cli, + ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt')) + + cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.read(8192) + self.assertEquals(response[-4:], "PONG") + + fake_ssl_server.stop() + fake_ssl_server.wait() + + def test_two_servers(self): + + def test_app(env, start_response): + start_response('200 OK', {}) + return ['PONG'] + + fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, + host="127.0.0.1", port=0, use_ssl=True) + fake_ssl_server.start() + self.assertNotEqual(0, fake_ssl_server.port) + + fake_server = nova.wsgi.Server("fake", test_app, + host="127.0.0.1", port=0) + fake_server.start() + self.assertNotEquals(0, fake_server.port) + + cli = eventlet.connect(("localhost", fake_ssl_server.port)) + cli = eventlet.wrap_ssl(cli, + ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt')) + + cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.read(8192) + self.assertEquals(response[-4:], "PONG") + + cli = eventlet.connect(("localhost", fake_server.port)) + + cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.recv(8192) + self.assertEquals(response[-4:], "PONG") + + fake_ssl_server.stop() + fake_ssl_server.wait() + + def test_app_using_ipv6_and_ssl(self): + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = nova.wsgi.Server("fake_ssl", + hello_world, + host="::1", + port=0, + use_ssl=True) + server.start() + + response = urllib2.urlopen('https://[::1]:%d/' % server.port) + self.assertEquals(greetings, response.read()) + + server.stop() + server.wait() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 0b1c5d0e7..067e28a13 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -1822,16 +1822,31 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase): # Consider abstracting common code in a base class for firewall driver testing. class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): - _in_nat_rules = [ + _in_rules = [ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', '*nat', ':PREROUTING ACCEPT [1170:189210]', ':INPUT ACCEPT [844:71028]', ':OUTPUT ACCEPT [5149:405186]', ':POSTROUTING ACCEPT [5063:386098]', - ] - - _in_filter_rules = [ + '# Completed on Mon Dec 6 11:54:13 2010', + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*mangle', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '[0:0] -A FORWARD -o virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + '[0:0] -A FORWARD -i virbr0 -j REJECT ' + '--reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010', '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', '*filter', ':INPUT ACCEPT [969615:281627771]', @@ -1916,7 +1931,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): def _validate_security_group(self): in_rules = filter(lambda l: not l.startswith('#'), - self._in_filter_rules) + self._in_rules) for rule in in_rules: if not 'nova' in rule: self.assertTrue(rule in self._out_rules, diff --git a/nova/tests/utils.py b/nova/tests/utils.py index 00b70ceb3..6437f9537 100644 --- a/nova/tests/utils.py +++ b/nova/tests/utils.py @@ -20,6 +20,7 @@ import nova.context import nova.db from nova.image import glance from nova.network import minidns +from nova.network import model as network_model from nova.openstack.common import cfg CONF = cfg.CONF @@ -91,6 +92,8 @@ def get_test_network_info(count=1): 'bridge_interface': fake_bridge_interface, 'injected': False} mapping = {'mac': fake, + 'vif_type': network_model.VIF_TYPE_BRIDGE, + 'vif_uuid': 'vif-xxx-yyy-zzz', 'dhcp_server': fake, 'dns': ['fake1', 'fake2'], 'gateway': fake, diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py index 7cc5c70da..844ae8459 100644 --- a/nova/tests/virt/xenapi/test_volumeops.py +++ b/nova/tests/virt/xenapi/test_volumeops.py @@ -21,6 +21,13 @@ from nova.virt.xenapi import volumeops class VolumeAttachTestCase(test.TestCase): def test_detach_volume_call(self): + registered_calls = [] + + def regcall(label): + def side_effect(*args, **kwargs): + registered_calls.append(label) + return side_effect + ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise') self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number') @@ -45,10 +52,12 @@ class VolumeAttachTestCase(test.TestCase): volumeops.vm_utils.unplug_vbd('session', 'vbdref') - volumeops.vm_utils.destroy_vbd('session', 'vbdref') + volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects( + regcall('destroy_vbd')) volumeops.volume_utils.find_sr_from_vbd( - 'session', 'vbdref').AndReturn('srref') + 'session', 'vbdref').WithSideEffects( + regcall('find_sr_from_vbd')).AndReturn('srref') volumeops.volume_utils.purge_sr('session', 'srref') @@ -58,6 +67,9 @@ class VolumeAttachTestCase(test.TestCase): dict(driver_volume_type='iscsi', data='conn_data'), 'instance_1', 'mountpoint') + self.assertEquals( + ['find_sr_from_vbd', 'destroy_vbd'], registered_calls) + def test_attach_volume_call(self): ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(ops, '_connect_volume') diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py index 494b201d0..0330246e2 100644 --- a/nova/tests/vmwareapi/stubs.py +++ b/nova/tests/vmwareapi/stubs.py @@ -21,31 +21,31 @@ Stubouts for the test suite from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import fake -from nova.virt.vmwareapi import network_utils +from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vmops from nova.virt.vmwareapi import vmware_images def fake_get_vim_object(arg): - """Stubs out the VMWareAPISession's get_vim_object method.""" + """Stubs out the VMwareAPISession's get_vim_object method.""" return fake.FakeVim() def fake_is_vim_object(arg, module): - """Stubs out the VMWareAPISession's is_vim_object method.""" + """Stubs out the VMwareAPISession's is_vim_object method.""" return isinstance(module, fake.FakeVim) def set_stubs(stubs): """Set the stubs.""" - stubs.Set(vmops.VMWareVMOps, 'plug_vifs', fake.fake_plug_vifs) - stubs.Set(network_utils, 'get_network_with_the_name', + stubs.Set(vmops.VMwareVMOps, 'plug_vifs', fake.fake_plug_vifs) + stubs.Set(network_util, 'get_network_with_the_name', fake.fake_get_network) stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image) stubs.Set(vmware_images, 'get_vmdk_size_and_properties', fake.fake_get_vmdk_size_and_properties) stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image) - stubs.Set(driver.VMWareAPISession, "_get_vim_object", + stubs.Set(driver.VMwareAPISession, "_get_vim_object", fake_get_vim_object) - stubs.Set(driver.VMWareAPISession, "_is_vim_object", + stubs.Set(driver.VMwareAPISession, "_is_vim_object", fake_is_vim_object) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 85c85b5e2..fa214b23e 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -208,12 +208,10 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests): def __init__(self, uri, test_case=None): super(FakeSessionForFirewallTests, self).__init__(uri) - if hasattr(test_case, '_in_filter_rules'): - self._in_filter_rules = test_case._in_filter_rules + if hasattr(test_case, '_in_rules'): + self._in_rules = test_case._in_rules if hasattr(test_case, '_in6_filter_rules'): self._in6_filter_rules = test_case._in6_filter_rules - if hasattr(test_case, '_in_nat_rules'): - self._in_nat_rules = test_case._in_nat_rules self._test_case = test_case def host_call_plugin(self, _1, _2, plugin, method, args): @@ -230,12 +228,10 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests): else: output = '' process_input = args.get('process_input', None) - if cmd == ['ip6tables-save', '-c', '-t', 'filter']: + if cmd == ['ip6tables-save', '-c']: output = '\n'.join(self._in6_filter_rules) - if cmd == ['iptables-save', '-c', '-t', 'filter']: - output = '\n'.join(self._in_filter_rules) - if cmd == ['iptables-save', '-c', '-t', 'nat']: - output = '\n'.join(self._in_nat_rules) + if cmd == ['iptables-save', '-c']: + output = '\n'.join(self._in_rules) if cmd == ['iptables-restore', '-c', ]: lines = process_input.split('\n') if '*filter' in lines: diff --git a/nova/utils.py b/nova/utils.py index 115791b64..75cba0a7c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -859,6 +859,14 @@ def bool_from_str(val): val.lower() == 'y' +def is_int_like(val): + """Check if a value looks like an int.""" + try: + return str(int(val)) == str(val) + except Exception: + return False + + def is_valid_boolstr(val): """Check if the provided string is a valid bool string or not.""" val = str(val).lower() diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index 462e0c444..9904fdcd4 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -188,13 +188,28 @@ class BareMetalDriver(driver.ComputeDriver): l.append(inst['name']) return l - def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info=None, block_device_info=None): + def _require_node(self, instance): + """Get a node_id out of a manager instance dict. + The compute manager is meant to know the node id, so a missing node is + a significant issue - it may mean we've been passed someone elses data. + """ node_id = instance.get('node') if not node_id: raise exception.NovaException(_( - "Baremetal node id not supplied to driver")) + "Baremetal node id not supplied to driver for %r") + % instance['uuid']) + return node_id + + def macs_for_instance(self, instance): + context = nova_context.get_admin_context() + node_id = self._require_node(instance) + return set(iface['address'] for iface in + db.bm_interface_get_all_by_bm_node_id(context, node_id)) + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, network_info=None, block_device_info=None): + node_id = self._require_node(instance) # NOTE(deva): this db method will raise an exception if the node is # already in use. We call it here to ensure no one else @@ -324,10 +339,9 @@ class BareMetalDriver(driver.ComputeDriver): return self.volume_driver.attach_volume(connection_info, instance, mountpoint) - @exception.wrap_exception() - def detach_volume(self, connection_info, instance, mountpoint): + def detach_volume(self, connection_info, instance_name, mountpoint): return self.volume_driver.detach_volume(connection_info, - instance, mountpoint) + instance_name, mountpoint) def get_info(self, instance): # NOTE(deva): compute/manager.py expects to get NotFound exception diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py index 97c158727..393b3657b 100644 --- a/nova/virt/baremetal/ipmi.py +++ b/nova/virt/baremetal/ipmi.py @@ -126,7 +126,7 @@ class IPMI(base.PowerManager): args.append(pwfile) args.extend(command.split(" ")) out, err = utils.execute(*args, attempts=3) - LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)%s'"), + LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"), locals()) return out, err finally: diff --git a/nova/virt/baremetal/net-dhcp.ubuntu.template b/nova/virt/baremetal/net-dhcp.ubuntu.template index e8824a88d..34a9e8be7 100644 --- a/nova/virt/baremetal/net-dhcp.ubuntu.template +++ b/nova/virt/baremetal/net-dhcp.ubuntu.template @@ -10,9 +10,6 @@ iface lo inet loopback #for $ifc in $interfaces auto ${ifc.name} iface ${ifc.name} inet dhcp -#if $ifc.hwaddress - hwaddress ether ${ifc.hwaddress} -#end if #if $use_ipv6 iface ${ifc.name} inet6 dhcp diff --git a/nova/virt/baremetal/net-static.ubuntu.template b/nova/virt/baremetal/net-static.ubuntu.template index f14f0ce8c..1fe5a1ab8 100644 --- a/nova/virt/baremetal/net-static.ubuntu.template +++ b/nova/virt/baremetal/net-static.ubuntu.template @@ -16,9 +16,6 @@ iface ${ifc.name} inet static #if $ifc.dns dns-nameservers ${ifc.dns} #end if -#if $ifc.hwaddress - hwaddress ether ${ifc.hwaddress} -#end if #if $use_ipv6 iface ${ifc.name} inet6 static diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py index b94ac9032..0daac1d46 100644 --- a/nova/virt/baremetal/pxe.py +++ b/nova/virt/baremetal/pxe.py @@ -121,7 +121,6 @@ def build_network_config(network_info): gateway_v6 = mapping['gateway_v6'] interface = { 'name': 'eth%d' % id, - 'hwaddress': mapping['mac'], 'address': mapping['ips'][0]['ip'], 'gateway': mapping['gateway'], 'netmask': mapping['ips'][0]['netmask'], @@ -219,7 +218,7 @@ def get_tftp_image_info(instance): missing_labels = [] for label in image_info.keys(): (uuid, path) = image_info[label] - if uuid is None: + if not uuid: missing_labels.append(label) else: image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, @@ -238,27 +237,12 @@ class PXE(base.NodeDriver): super(PXE, self).__init__() def _collect_mac_addresses(self, context, node): - macs = [] - macs.append(db.bm_node_get(context, node['id'])['prov_mac_address']) + macs = set() + macs.add(db.bm_node_get(context, node['id'])['prov_mac_address']) for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']): if nic['address']: - macs.append(nic['address']) - macs.sort() - return macs - - def _generate_udev_rules(self, context, node): - # TODO(deva): fix assumption that device names begin with "eth" - # and fix assumption of ordering - macs = self._collect_mac_addresses(context, node) - rules = '' - for (i, mac) in enumerate(macs): - rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \ - 'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \ - 'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \ - % {'mac': mac.lower(), - 'name': 'eth%d' % i, - } - return rules + macs.add(nic['address']) + return sorted(macs) def _cache_tftp_images(self, context, instance, image_info): """Fetch the necessary kernels and ramdisks for the instance.""" @@ -330,9 +314,6 @@ class PXE(base.NodeDriver): injected_files = [] net_config = build_network_config(network_info) - udev_rules = self._generate_udev_rules(context, node) - injected_files.append( - ('/etc/udev/rules.d/70-persistent-net.rules', udev_rules)) if instance['hostname']: injected_files.append(('/etc/hostname', instance['hostname'])) @@ -385,7 +366,6 @@ class PXE(base.NodeDriver): config ./pxelinux.cfg/ {mac} -> ../{uuid}/config - """ image_info = get_tftp_image_info(instance) (root_mb, swap_mb) = get_partition_sizes(instance) diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py index 570cea1d8..0a05dfedd 100644 --- a/nova/virt/baremetal/volume_driver.py +++ b/nova/virt/baremetal/volume_driver.py @@ -31,7 +31,7 @@ opts = [ cfg.BoolOpt('use_unsafe_iscsi', default=False, help='Do not set this out of dev/test environments. ' - 'If a node does not have an fixed PXE IP address, ' + 'If a node does not have a fixed PXE IP address, ' 'volumes are exported with globally opened ACL'), cfg.StrOpt('iscsi_iqn_prefix', default='iqn.2010-10.org.openstack.baremetal', @@ -246,7 +246,6 @@ class LibvirtVolumeDriver(VolumeDriver): # TODO(NTTdocomo): support CHAP _allow_iscsi_tgtadm(tid, 'ALL') - @exception.wrap_exception() def detach_volume(self, connection_info, instance, mountpoint): mount_device = mountpoint.rpartition("/")[2] try: diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py index d4352c5e6..886136460 100644 --- a/nova/virt/configdrive.py +++ b/nova/virt/configdrive.py @@ -130,20 +130,16 @@ class ConfigDriveBuilder(object): try: mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir, prefix='cd_mnt_') - _out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir, + _out, err = utils.trycmd('mount', '-o', + 'loop,uid=%d,gid=%d' % (os.getuid(), + os.getgid()), + path, mountdir, run_as_root=True) if err: raise exception.ConfigDriveMountFailed(operation='mount', error=err) mounted = True - _out, err = utils.trycmd('chown', - '%s.%s' % (os.getuid(), os.getgid()), - mountdir, run_as_root=True) - if err: - raise exception.ConfigDriveMountFailed(operation='chown', - error=err) - # NOTE(mikal): I can't just use shutils.copytree here, because the # destination directory already exists. This is annoying. for ent in os.listdir(self.tempdir): diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index 26fb86f1e..d080f6d36 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -96,9 +96,13 @@ def mkfs(os_type, fs_label, target): utils.execute(*mkfs_command.split()) -def resize2fs(image, check_exit_code=False): - utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code) - utils.execute('resize2fs', image, check_exit_code=check_exit_code) +def resize2fs(image, check_exit_code=False, run_as_root=False): + utils.execute('e2fsck', '-fp', image, + check_exit_code=check_exit_code, + run_as_root=run_as_root) + utils.execute('resize2fs', image, + check_exit_code=check_exit_code, + run_as_root=run_as_root) def get_disk_size(path): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index e396de6a0..747b60714 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -49,6 +49,17 @@ CONF.register_opts(driver_opts) LOG = logging.getLogger(__name__) +def driver_dict_from_config(named_driver_config, *args, **kwargs): + driver_registry = dict() + + for driver_str in named_driver_config: + driver_type, _sep, driver = driver_str.partition('=') + driver_class = importutils.import_class(driver) + driver_registry[driver_type] = driver_class(*args, **kwargs) + + return driver_registry + + def block_device_info_get_root(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('root_device_name') @@ -258,6 +269,10 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() + def get_spice_console(self, instance): + # TODO(Vek): Need to pass context in for access to auth_token + raise NotImplementedError() + def get_diagnostics(self, instance): """Return data about VM diagnostics.""" # TODO(Vek): Need to pass context in for access to auth_token @@ -443,7 +458,8 @@ class ComputeDriver(object): def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, - block_migration=False): + block_migration=False, + block_device_info=None): """Post operation of live migration at destination host. :param ctxt: security context @@ -732,6 +748,35 @@ class ComputeDriver(object): # related helpers. raise NotImplementedError(self.legacy_nwinfo) + def macs_for_instance(self, instance): + """What MAC addresses must this instance have? + + Some hypervisors (such as bare metal) cannot do freeform virtualisation + of MAC addresses. This method allows drivers to return a set of MAC + addresses that the instance is to have. allocate_for_instance will take + this into consideration when provisioning networking for the instance. + + Mapping of MAC addresses to actual networks (or permitting them to be + freeform) is up to the network implementation layer. For instance, + with openflow switches, fixed MAC addresses can still be virtualised + onto any L2 domain, with arbitrary VLANs etc, but regular switches + require pre-configured MAC->network mappings that will match the + actual configuration. + + Most hypervisors can use the default implementation which returns None. + Hypervisors with MAC limits should return a set of MAC addresses, which + will be supplied to the allocate_for_instance call by the compute + manager, and it is up to that call to ensure that all assigned network + details are compatible with the set of MAC addresses. + + This is called during spawn_instance by the compute manager. + + :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])). + None means 'no constraints', a set means 'these and only these + MAC addresses'. + """ + return None + def manage_image_cache(self, context, all_instances): """ Manage the driver's local image cache. diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 0a29a6d67..04eeded72 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -166,6 +166,12 @@ class FakeDriver(driver.ComputeDriver): block_device_info=None): pass + def post_live_migration_at_destination(self, context, instance, + network_info, + block_migration=False, + block_device_info=None): + pass + def power_off(self, instance): pass @@ -271,6 +277,12 @@ class FakeDriver(driver.ComputeDriver): 'host': 'fakevncconsole.com', 'port': 6969} + def get_spice_console(self, instance): + return {'internal_access_path': 'FAKE', + 'host': 'fakespiceconsole.com', + 'port': 6969, + 'tlsPort': 6970} + def get_console_pool_info(self, console_type): return {'address': '127.0.0.1', 'username': 'fakeuser', diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index bbc6034bd..ad38cd9a4 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -146,7 +146,7 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables = linux_net.iptables_manager self.instances = {} self.network_infos = {} - self.basicly_filtered = False + self.basically_filtered = False self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 799ef7172..9316b2598 100644 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -164,7 +164,7 @@ class HyperVDriver(driver.ComputeDriver): block_device_info, network_info) def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration): + network_info, block_migration, block_device_info=None): self._livemigrationops.post_live_migration_at_destination(ctxt, instance_ref, network_info, block_migration) diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py new file mode 100644 index 000000000..a898d3ac2 --- /dev/null +++ b/nova/virt/hyperv/vif.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 Cloudbase Solutions Srl +# Copyright 2013 Pedro Navarro Perez +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import sys +import uuid + +# Check needed for unit testing on Unix +if sys.platform == 'win32': + import wmi + +from abc import abstractmethod +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova.virt.hyperv import vmutils + +hyperv_opts = [ + cfg.StrOpt('vswitch_name', + default=None, + help='External virtual switch Name, ' + 'if not provided, the first external virtual ' + 'switch is used'), +] + +CONF = cfg.CONF +CONF.register_opts(hyperv_opts) + +LOG = logging.getLogger(__name__) + + +class HyperVBaseVIFDriver(object): + @abstractmethod + def plug(self, instance, vif): + pass + + @abstractmethod + def unplug(self, instance, vif): + pass + + +class HyperVQuantumVIFDriver(HyperVBaseVIFDriver): + """Quantum VIF driver.""" + + def plug(self, instance, vif): + # Quantum takes care of plugging the port + pass + + def unplug(self, instance, vif): + # Quantum takes care of unplugging the port + pass + + +class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver): + """Nova network VIF driver.""" + + def __init__(self): + self._vmutils = vmutils.VMUtils() + self._conn = wmi.WMI(moniker='//./root/virtualization') + + def _find_external_network(self): + """Find the vswitch that is connected to the physical nic. + Assumes only one physical nic on the host + """ + #If there are no physical nics connected to networks, return. + LOG.debug(_("Attempting to bind NIC to %s ") + % CONF.vswitch_name) + if CONF.vswitch_name: + LOG.debug(_("Attempting to bind NIC to %s ") + % CONF.vswitch_name) + bound = self._conn.Msvm_VirtualSwitch( + ElementName=CONF.vswitch_name) + else: + LOG.debug(_("No vSwitch specified, attaching to default")) + self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') + if len(bound) == 0: + return None + if CONF.vswitch_name: + return self._conn.Msvm_VirtualSwitch( + ElementName=CONF.vswitch_name)[0]\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + else: + return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + + def plug(self, instance, vif): + extswitch = self._find_external_network() + if extswitch is None: + raise vmutils.HyperVException(_('Cannot find vSwitch')) + + vm_name = instance['name'] + + nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData( + ElementName=vif['id'])[0] + + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + #Create a port on the vswitch. + (new_port, ret_val) = switch_svc.CreateSwitchPort( + Name=str(uuid.uuid4()), + FriendlyName=vm_name, + ScopeOfResidence="", + VirtualSwitch=extswitch.path_()) + if ret_val != 0: + LOG.error(_('Failed creating a port on the external vswitch')) + raise vmutils.HyperVException(_('Failed creating port for %s') % + vm_name) + ext_path = extswitch.path_() + LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") + % locals()) + + vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) + vm = vms[0] + + nic_data.Connection = [new_port] + self._vmutils.modify_virt_resource(self._conn, nic_data, vm) + + def unplug(self, instance, vif): + #TODO(alepilotti) Not implemented + pass diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py index 1fba15506..3d8958266 100644 --- a/nova/virt/hyperv/vmops.py +++ b/nova/virt/hyperv/vmops.py @@ -24,6 +24,7 @@ import uuid from nova.api.metadata import base as instance_metadata from nova import exception from nova.openstack.common import cfg +from nova.openstack.common import importutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova import utils @@ -35,10 +36,6 @@ from nova.virt.hyperv import vmutils LOG = logging.getLogger(__name__) hyperv_opts = [ - cfg.StrOpt('vswitch_name', - default=None, - help='Default vSwitch Name, ' - 'if none provided first external is used'), cfg.BoolOpt('limit_cpu_features', default=False, help='Required for live migration among ' @@ -59,14 +56,32 @@ hyperv_opts = [ CONF = cfg.CONF CONF.register_opts(hyperv_opts) CONF.import_opt('use_cow_images', 'nova.virt.driver') +CONF.import_opt('network_api_class', 'nova.network') class VMOps(baseops.BaseOps): + _vif_driver_class_map = { + 'nova.network.quantumv2.api.API': + 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver', + 'nova.network.api.API': + 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver', + } + def __init__(self, volumeops): super(VMOps, self).__init__() self._vmutils = vmutils.VMUtils() self._volumeops = volumeops + self._load_vif_driver_class() + + def _load_vif_driver_class(self): + try: + class_name = self._vif_driver_class_map[CONF.network_api_class] + self._vif_driver = importutils.import_object(class_name) + except KeyError: + raise TypeError(_("VIF driver not found for " + "network_api_class: %s") % + CONF.network_api_class) def list_instances(self): """Return the names of all the instances known to Hyper-V.""" @@ -158,8 +173,8 @@ class VMOps(baseops.BaseOps): self._create_scsi_controller(instance['name']) for vif in network_info: - mac_address = vif['address'].replace(':', '') - self._create_nic(instance['name'], mac_address) + self._create_nic(instance['name'], vif) + self._vif_driver.plug(instance, vif) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, @@ -367,46 +382,28 @@ class VMOps(baseops.BaseOps): LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') % locals()) - def _create_nic(self, vm_name, mac): + def _create_nic(self, vm_name, vif): """Create a (synthetic) nic and attach it to the vm.""" LOG.debug(_('Creating nic for %s '), vm_name) - #Find the vswitch that is connected to the physical nic. - vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) - extswitch = self._find_external_network() - if extswitch is None: - raise vmutils.HyperVException(_('Cannot find vSwitch')) - vm = vms[0] - switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] - #Find the default nic and clone it to create a new nic for the vm. - #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with - #Linux Integration Components installed. + #Create a new nic syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData() default_nic_data = [n for n in syntheticnics_data if n.InstanceID.rfind('Default') > 0] new_nic_data = self._vmutils.clone_wmi_obj(self._conn, 'Msvm_SyntheticEthernetPortSettingData', default_nic_data[0]) - #Create a port on the vswitch. - (new_port, ret_val) = switch_svc.CreateSwitchPort( - Name=str(uuid.uuid4()), - FriendlyName=vm_name, - ScopeOfResidence="", - VirtualSwitch=extswitch.path_()) - if ret_val != 0: - LOG.error(_('Failed creating a port on the external vswitch')) - raise vmutils.HyperVException(_('Failed creating port for %s') % - vm_name) - ext_path = extswitch.path_() - LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") - % locals()) - #Connect the new nic to the new port. - new_nic_data.Connection = [new_port] - new_nic_data.ElementName = vm_name + ' nic' - new_nic_data.Address = mac + + #Configure the nic + new_nic_data.ElementName = vif['id'] + new_nic_data.Address = vif['address'].replace(':', '') new_nic_data.StaticMacAddress = 'True' new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] - #Add the new nic to the vm. + + #Add the new nic to the vm + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + vm = vms[0] + new_resources = self._vmutils.add_virt_resource(self._conn, new_nic_data, vm) if new_resources is None: @@ -414,33 +411,6 @@ class VMOps(baseops.BaseOps): vm_name) LOG.info(_("Created nic for %s "), vm_name) - def _find_external_network(self): - """Find the vswitch that is connected to the physical nic. - Assumes only one physical nic on the host - """ - #If there are no physical nics connected to networks, return. - LOG.debug(_("Attempting to bind NIC to %s ") - % CONF.vswitch_name) - if CONF.vswitch_name: - LOG.debug(_("Attempting to bind NIC to %s ") - % CONF.vswitch_name) - bound = self._conn.Msvm_VirtualSwitch( - ElementName=CONF.vswitch_name) - else: - LOG.debug(_("No vSwitch specified, attaching to default")) - self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') - if len(bound) == 0: - return None - if CONF.vswitch_name: - return self._conn.Msvm_VirtualSwitch( - ElementName=CONF.vswitch_name)[0]\ - .associators(wmi_result_class='Msvm_SwitchPort')[0]\ - .associators(wmi_result_class='Msvm_VirtualSwitch')[0] - else: - return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\ - .associators(wmi_result_class='Msvm_SwitchPort')[0]\ - .associators(wmi_result_class='Msvm_VirtualSwitch')[0] - def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" vm = self._vmutils.lookup(self._conn, instance['name']) diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py index bae8a1f1a..d899f977d 100644 --- a/nova/virt/hyperv/vmutils.py +++ b/nova/virt/hyperv/vmutils.py @@ -130,7 +130,7 @@ class VMUtils(object): return newinst def add_virt_resource(self, conn, res_setting_data, target_vm): - """Add a new resource (disk/nic) to the VM.""" + """Adds a new resource to the VM.""" vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] (job, new_resources, ret_val) = vs_man_svc.\ AddVirtualSystemResources([res_setting_data.GetText_(1)], @@ -145,8 +145,20 @@ class VMUtils(object): else: return None + def modify_virt_resource(self, conn, res_setting_data, target_vm): + """Updates a VM resource.""" + vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + ResourceSettingData=[res_setting_data.GetText_(1)], + ComputerSystem=target_vm.path_()) + if ret_val == constants.WMI_JOB_STATUS_STARTED: + success = self.check_job_status(job) + else: + success = (ret_val == 0) + return success + def remove_virt_resource(self, conn, res_setting_data, target_vm): - """Add a new resource (disk/nic) to the VM.""" + """Removes a VM resource.""" vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0] (job, ret_val) = vs_man_svc.\ RemoveVirtualSystemResources([res_setting_data.path_()], diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py index 200236233..b69cf7bf1 100644 --- a/nova/virt/hyperv/volumeops.py +++ b/nova/virt/hyperv/volumeops.py @@ -37,7 +37,7 @@ hyper_volumeops_opts = [ help='The number of times we retry on attaching volume '), cfg.IntOpt('hyperv_wait_between_attach_retry', default=5, - help='The seconds to wait between an volume attachment attempt'), + help='The seconds to wait between a volume attachment attempt'), cfg.BoolOpt('force_volumeutils_v1', default=False, help='Force volumeutils v1'), @@ -183,7 +183,7 @@ class VolumeOps(baseops.BaseOps): "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\ AND Parent = '" + scsi_controller.path_() + "'") - #Slots starts from 0, so the lenght of the disks gives us the free slot + #Slots starts from 0, so the length of the disks gives us the free slot return len(volumes) def detach_volume(self, connection_info, instance_name, mountpoint): diff --git a/nova/virt/images.py b/nova/virt/images.py index f80c19999..018badecf 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -123,7 +123,7 @@ class QemuImgInfo(object): if len(line_pieces) != 6: break else: - # Check against this pattern occuring in the final position + # Check against this pattern in the final position # "%02d:%02d:%02d.%03d" date_pieces = line_pieces[5].split(":") if len(date_pieces) != 3: @@ -175,7 +175,7 @@ class QemuImgInfo(object): def qemu_img_info(path): - """Return a object containing the parsed output from qemu-img info.""" + """Return an object containing the parsed output from qemu-img info.""" if not os.path.exists(path): return QemuImgInfo() diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py index 6785c8823..ed5b21c79 100644 --- a/nova/virt/libvirt/config.py +++ b/nova/virt/libvirt/config.py @@ -648,21 +648,34 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice): return dev -class LibvirtConfigGuestChar(LibvirtConfigGuestDevice): +class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice): def __init__(self, **kwargs): - super(LibvirtConfigGuestChar, self).__init__(**kwargs) + super(LibvirtConfigGuestCharBase, self).__init__(**kwargs) self.type = "pty" self.source_path = None - self.target_port = None def format_dom(self): - dev = super(LibvirtConfigGuestChar, self).format_dom() + dev = super(LibvirtConfigGuestCharBase, self).format_dom() dev.set("type", self.type) if self.type == "file": dev.append(etree.Element("source", path=self.source_path)) + + return dev + + +class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestChar, self).__init__(**kwargs) + + self.target_port = None + + def format_dom(self): + dev = super(LibvirtConfigGuestChar, self).format_dom() + if self.target_port is not None: dev.append(etree.Element("target", port=str(self.target_port))) @@ -683,6 +696,26 @@ class LibvirtConfigGuestConsole(LibvirtConfigGuestChar): **kwargs) +class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase): + + def __init__(self, **kwargs): + super(LibvirtConfigGuestChannel, self).__init__(root_name="channel", + **kwargs) + + self.target_type = "virtio" + self.target_name = None + + def format_dom(self): + dev = super(LibvirtConfigGuestChannel, self).format_dom() + + target = etree.Element("target", type=self.target_type) + if self.target_name is not None: + target.set("name", self.target_name) + dev.append(target) + + return dev + + class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 42d9dd99b..e4da5cbde 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -196,6 +196,7 @@ CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') +CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( libvirt_firewall.__name__, @@ -276,17 +277,17 @@ class LibvirtDriver(driver.ComputeDriver): self._host_state = None self._initiator = None self._wrapped_conn = None + self._caps = None self.read_only = read_only self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, self.virtapi, get_connection=self._get_connection) self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver) - self.volume_drivers = {} - for driver_str in CONF.libvirt_volume_drivers: - driver_type, _sep, driver = driver_str.partition('=') - driver_class = importutils.import_class(driver) - self.volume_drivers[driver_type] = driver_class(self) + + self.volume_drivers = driver.driver_dict_from_config( + CONF.libvirt_volume_drivers, self) + self._host_state = None disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"} @@ -361,7 +362,7 @@ class LibvirtDriver(driver.ComputeDriver): def _test_connection(self): try: - self._wrapped_conn.getCapabilities() + self._wrapped_conn.getLibVersion() return True except libvirt.libvirtError as e: if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and @@ -425,10 +426,10 @@ class LibvirtDriver(driver.ComputeDriver): """Efficient override of base instance_exists method.""" return self._conn.numOfDomains() - def instance_exists(self, instance_id): + def instance_exists(self, instance_name): """Efficient override of base instance_exists method.""" try: - self._lookup_by_name(instance_id) + self._lookup_by_name(instance_name) return True except exception.NovaException: return False @@ -586,7 +587,7 @@ class LibvirtDriver(driver.ComputeDriver): mount_device) if destroy_disks: - target = os.path.join(CONF.instances_path, instance['name']) + target = libvirt_utils.get_instance_path(instance) LOG.info(_('Deleting instance files %(target)s') % locals(), instance=instance) if os.path.exists(target): @@ -642,8 +643,7 @@ class LibvirtDriver(driver.ComputeDriver): } def _cleanup_resize(self, instance, network_info): - target = os.path.join(CONF.instances_path, - instance['name'] + "_resize") + target = libvirt_utils.get_instance_path(instance) + "_resize" if os.path.exists(target): shutil.rmtree(target) @@ -661,7 +661,6 @@ class LibvirtDriver(driver.ComputeDriver): method = getattr(driver, method_name) return method(connection_info, *args, **kwargs) - @exception.wrap_exception() def attach_volume(self, connection_info, instance, mountpoint): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) @@ -707,7 +706,8 @@ class LibvirtDriver(driver.ComputeDriver): if child.get('dev') == device: return etree.tostring(node) - def _get_domain_xml(self, instance, network_info, block_device_info=None): + def _get_existing_domain_xml(self, instance, network_info, + block_device_info=None): try: virt_dom = self._lookup_by_name(instance['name']) xml = virt_dom.XMLDesc(0) @@ -716,7 +716,6 @@ class LibvirtDriver(driver.ComputeDriver): block_device_info=block_device_info) return xml - @exception.wrap_exception() def detach_volume(self, connection_info, instance, mountpoint): instance_name = instance['name'] mount_device = mountpoint.rpartition("/")[2] @@ -749,7 +748,6 @@ class LibvirtDriver(driver.ComputeDriver): connection_info, mount_device) - @exception.wrap_exception() def snapshot(self, context, instance, image_href, update_task_state): """Create snapshot from a running VM instance. @@ -810,7 +808,7 @@ class LibvirtDriver(driver.ComputeDriver): # NOTE(dkang): managedSave does not work for LXC if CONF.libvirt_type != 'lxc': - if state == power_state.RUNNING: + if state == power_state.RUNNING or state == power_state.PAUSED: virt_dom.managedSave(0) # Make the snapshot @@ -834,6 +832,9 @@ class LibvirtDriver(driver.ComputeDriver): if CONF.libvirt_type != 'lxc': if state == power_state.RUNNING: self._create_domain(domain=virt_dom) + elif state == power_state.PAUSED: + self._create_domain(domain=virt_dom, + launch_flags=libvirt.VIR_DOMAIN_START_PAUSED) # Upload that image to the image service @@ -845,7 +846,6 @@ class LibvirtDriver(driver.ComputeDriver): metadata, image_file) - @exception.wrap_exception() def reboot(self, instance, network_info, reboot_type='SOFT', block_device_info=None): """Reboot a virtual machine, given an instance reference.""" @@ -858,8 +858,7 @@ class LibvirtDriver(driver.ComputeDriver): else: LOG.warn(_("Failed to soft reboot instance."), instance=instance) - return self._hard_reboot(instance, network_info, - block_device_info=block_device_info) + return self._hard_reboot(instance, network_info, block_device_info) def _soft_reboot(self, instance): """Attempt to shutdown and restart the instance gracefully. @@ -898,8 +897,7 @@ class LibvirtDriver(driver.ComputeDriver): greenthread.sleep(1) return False - def _hard_reboot(self, instance, network_info, xml=None, - block_device_info=None): + def _hard_reboot(self, instance, network_info, block_device_info=None): """Reboot a virtual machine, given an instance reference. Performs a Libvirt reset (if supported) on the domain. @@ -912,11 +910,10 @@ class LibvirtDriver(driver.ComputeDriver): existing domain. """ - if not xml: - xml = self._get_domain_xml(instance, network_info, - block_device_info) - self._destroy(instance) + xml = self.to_xml(instance, network_info, + block_device_info=block_device_info, + write_to_disk=True) self._create_domain_and_network(xml, instance, network_info, block_device_info) @@ -932,24 +929,20 @@ class LibvirtDriver(driver.ComputeDriver): timer = utils.FixedIntervalLoopingCall(_wait_for_reboot) timer.start(interval=0.5).wait() - @exception.wrap_exception() def pause(self, instance): """Pause VM instance.""" dom = self._lookup_by_name(instance['name']) dom.suspend() - @exception.wrap_exception() def unpause(self, instance): """Unpause paused VM instance.""" dom = self._lookup_by_name(instance['name']) dom.resume() - @exception.wrap_exception() def power_off(self, instance): """Power off the specified instance.""" self._destroy(instance) - @exception.wrap_exception() def power_on(self, instance): """Power on the specified instance.""" dom = self._lookup_by_name(instance['name']) @@ -958,28 +951,44 @@ class LibvirtDriver(driver.ComputeDriver): instance) timer.start(interval=0.5).wait() - @exception.wrap_exception() def suspend(self, instance): """Suspend the specified instance.""" dom = self._lookup_by_name(instance['name']) dom.managedSave(0) - @exception.wrap_exception() def resume(self, instance, network_info, block_device_info=None): """resume the specified instance.""" - xml = self._get_domain_xml(instance, network_info, block_device_info) + xml = self._get_existing_domain_xml(instance, network_info, + block_device_info) self._create_domain_and_network(xml, instance, network_info, block_device_info) - @exception.wrap_exception() def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" - xml = self._get_domain_xml(instance, network_info, block_device_info) + xml = self._get_existing_domain_xml(instance, network_info, + block_device_info) self._create_domain_and_network(xml, instance, network_info, block_device_info) - @exception.wrap_exception() + # Check if the instance is running already and avoid doing + # anything if it is. + if self.instance_exists(instance['name']): + domain = self._lookup_by_name(instance['name']) + state = LIBVIRT_POWER_STATE[domain.info()[0]] + + ignored_states = (power_state.RUNNING, + power_state.SUSPENDED, + power_state.PAUSED) + + if state in ignored_states: + return + + # Instance is not up and could be in an unknown state. + # Be as absolute as possible about getting it back into + # a known and running state. + self._hard_reboot(instance, network_info, block_device_info) + def rescue(self, context, instance, network_info, image_meta, rescue_password): """Loads a VM using rescue images. @@ -990,11 +999,9 @@ class LibvirtDriver(driver.ComputeDriver): data recovery. """ - - unrescue_xml = self._get_domain_xml(instance, network_info) - unrescue_xml_path = os.path.join(CONF.instances_path, - instance['name'], - 'unrescue.xml') + instance_dir = libvirt_utils.get_instance_path(instance) + unrescue_xml = self._get_existing_domain_xml(instance, network_info) + unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml) rescue_images = { @@ -1010,24 +1017,20 @@ class LibvirtDriver(driver.ComputeDriver): self._destroy(instance) self._create_domain(xml) - @exception.wrap_exception() def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. """ - unrescue_xml_path = os.path.join(CONF.instances_path, - instance['name'], - 'unrescue.xml') + instance_dir = libvirt_utils.get_instance_path(instance) + unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') xml = libvirt_utils.load_file(unrescue_xml_path) virt_dom = self._lookup_by_name(instance['name']) self._destroy(instance) self._create_domain(xml, virt_dom) libvirt_utils.file_delete(unrescue_xml_path) - rescue_files = os.path.join(CONF.instances_path, instance['name'], - "*.rescue") + rescue_files = os.path.join(instance_dir, "*.rescue") for rescue_file in glob.iglob(rescue_files): libvirt_utils.file_delete(rescue_file) - @exception.wrap_exception() def poll_rebooting_instances(self, timeout, instances): pass @@ -1042,7 +1045,6 @@ class LibvirtDriver(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) - @exception.wrap_exception() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): xml = self.to_xml(instance, network_info, image_meta, @@ -1083,7 +1085,6 @@ class LibvirtDriver(driver.ComputeDriver): fp.write(data) return fpath - @exception.wrap_exception() def get_console_output(self, instance): virt_dom = self._lookup_by_name(instance['name']) xml = virt_dom.XMLDesc(0) @@ -1134,9 +1135,9 @@ class LibvirtDriver(driver.ComputeDriver): msg = _("Guest does not have a console available") raise exception.NovaException(msg) - self._chown_console_log_for_instance(instance['name']) + self._chown_console_log_for_instance(instance) data = self._flush_libvirt_console(pty) - console_log = self._get_console_log_path(instance['name']) + console_log = self._get_console_log_path(instance) fpath = self._append_to_file(data, console_log) with libvirt_utils.file_open(fpath, 'rb') as fp: @@ -1150,7 +1151,6 @@ class LibvirtDriver(driver.ComputeDriver): def get_host_ip_addr(): return CONF.my_ip - @exception.wrap_exception() def get_vnc_console(self, instance): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) @@ -1167,6 +1167,27 @@ class LibvirtDriver(driver.ComputeDriver): return {'host': host, 'port': port, 'internal_access_path': None} + @exception.wrap_exception() + def get_spice_console(self, instance): + def get_spice_ports_for_instance(instance_name): + virt_dom = self._lookup_by_name(instance_name) + xml = virt_dom.XMLDesc(0) + # TODO(sleepsonthefloor): use etree instead of minidom + dom = minidom.parseString(xml) + + for graphic in dom.getElementsByTagName('graphics'): + if graphic.getAttribute('type') == 'spice': + return (graphic.getAttribute('port'), + graphic.getAttribute('tlsPort')) + + return (None, None) + + ports = get_spice_ports_for_instance(instance['name']) + host = CONF.spice.server_proxyclient_address + + return {'host': host, 'port': ports[0], + 'tlsPort': ports[1], 'internal_access_path': None} + @staticmethod def _supports_direct_io(dirpath): @@ -1227,11 +1248,12 @@ class LibvirtDriver(driver.ComputeDriver): utils.mkfs('swap', target) @staticmethod - def _get_console_log_path(instance_name): - return os.path.join(CONF.instances_path, instance_name, 'console.log') + def _get_console_log_path(instance): + return os.path.join(libvirt_utils.get_instance_path(instance), + 'console.log') - def _chown_console_log_for_instance(self, instance_name): - console_log = self._get_console_log_path(instance_name) + def _chown_console_log_for_instance(self, instance): + console_log = self._get_console_log_path(instance) if os.path.exists(console_log): libvirt_utils.chown(console_log, os.getuid()) @@ -1243,12 +1265,11 @@ class LibvirtDriver(driver.ComputeDriver): # syntactic nicety def basepath(fname='', suffix=suffix): - return os.path.join(CONF.instances_path, - instance['name'], + return os.path.join(libvirt_utils.get_instance_path(instance), fname + suffix) def image(fname, image_type=CONF.libvirt_images_type): - return self.image_backend.image(instance['name'], + return self.image_backend.image(instance, fname + suffix, image_type) def raw(fname): @@ -1261,11 +1282,11 @@ class LibvirtDriver(driver.ComputeDriver): libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml) # NOTE(dprince): for rescue console.log may already exist... chown it. - self._chown_console_log_for_instance(instance['name']) + self._chown_console_log_for_instance(instance) # NOTE(vish): No need add the suffix to console.log libvirt_utils.write_to_file( - self._get_console_log_path(instance['name']), '', 007) + self._get_console_log_path(instance), '', 007) if not disk_images: disk_images = {'image_id': instance['image_ref'], @@ -1443,11 +1464,11 @@ class LibvirtDriver(driver.ComputeDriver): def get_host_capabilities(self): """Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host""" - xmlstr = self._conn.getCapabilities() - - caps = vconfig.LibvirtConfigCaps() - caps.parse_str(xmlstr) - return caps + if not self._caps: + xmlstr = self._conn.getCapabilities() + self._caps = vconfig.LibvirtConfigCaps() + self._caps.parse_str(xmlstr) + return self._caps def get_host_uuid(self): """Returns a UUID representing the host.""" @@ -1472,6 +1493,7 @@ class LibvirtDriver(driver.ComputeDriver): for hostfeat in hostcpu.features: guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name) guestfeat.policy = "require" + guestcpu.features.append(guestfeat) return guestcpu @@ -1537,9 +1559,8 @@ class LibvirtDriver(driver.ComputeDriver): if CONF.libvirt_type == "lxc": fs = vconfig.LibvirtConfigGuestFilesys() fs.source_type = "mount" - fs.source_dir = os.path.join(CONF.instances_path, - instance['name'], - 'rootfs') + fs.source_dir = os.path.join( + libvirt_utils.get_instance_path(instance), 'rootfs') devices.append(fs) else: if image_meta and image_meta.get('disk_format') == 'iso': @@ -1557,8 +1578,7 @@ class LibvirtDriver(driver.ComputeDriver): def disk_info(name, disk_dev, disk_bus=default_disk_bus, device_type="disk"): - image = self.image_backend.image(instance['name'], - name) + image = self.image_backend.image(instance, name) return image.libvirt_info(disk_bus, disk_dev, device_type, @@ -1645,9 +1665,8 @@ class LibvirtDriver(driver.ComputeDriver): diskconfig.source_type = "file" diskconfig.driver_format = "raw" diskconfig.driver_cache = self.disk_cachemode - diskconfig.source_path = os.path.join(CONF.instances_path, - instance['name'], - "disk.config") + diskconfig.source_path = os.path.join( + libvirt_utils.get_instance_path(instance), "disk.config") diskconfig.target_dev = self.default_last_device diskconfig.target_bus = default_disk_bus devices.append(diskconfig) @@ -1675,6 +1694,7 @@ class LibvirtDriver(driver.ComputeDriver): 'kernel_id' if a kernel is needed for the rescue image. """ inst_type = instance['instance_type'] + inst_path = libvirt_utils.get_instance_path(instance) guest = vconfig.LibvirtConfigGuest() guest.virt_type = CONF.libvirt_type @@ -1733,9 +1753,7 @@ class LibvirtDriver(driver.ComputeDriver): if rescue: if rescue.get('kernel_id'): - guest.os_kernel = os.path.join(CONF.instances_path, - instance['name'], - "kernel.rescue") + guest.os_kernel = os.path.join(inst_path, "kernel.rescue") if CONF.libvirt_type == "xen": guest.os_cmdline = "ro" else: @@ -1743,22 +1761,16 @@ class LibvirtDriver(driver.ComputeDriver): (root_device_name or "/dev/vda",)) if rescue.get('ramdisk_id'): - guest.os_initrd = os.path.join(CONF.instances_path, - instance['name'], - "ramdisk.rescue") + guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue") elif instance['kernel_id']: - guest.os_kernel = os.path.join(CONF.instances_path, - instance['name'], - "kernel") + guest.os_kernel = os.path.join(inst_path, "kernel") if CONF.libvirt_type == "xen": guest.os_cmdline = "ro" else: guest.os_cmdline = ("root=%s console=ttyS0" % (root_device_name or "/dev/vda",)) if instance['ramdisk_id']: - guest.os_initrd = os.path.join(CONF.instances_path, - instance['name'], - "ramdisk") + guest.os_initrd = os.path.join(inst_path, "ramdisk") else: guest.os_boot_dev = "hd" @@ -1806,8 +1818,7 @@ class LibvirtDriver(driver.ComputeDriver): # to configure two separate consoles. consolelog = vconfig.LibvirtConfigGuestSerial() consolelog.type = "file" - consolelog.source_path = self._get_console_log_path( - instance['name']) + consolelog.source_path = self._get_console_log_path(instance) guest.add_device(consolelog) consolepty = vconfig.LibvirtConfigGuestSerial() @@ -1818,27 +1829,64 @@ class LibvirtDriver(driver.ComputeDriver): consolepty.type = "pty" guest.add_device(consolepty) - if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'): - if CONF.use_usb_tablet and guest.os_type == vm_mode.HVM: - tablet = vconfig.LibvirtConfigGuestInput() - tablet.type = "tablet" - tablet.bus = "usb" - guest.add_device(tablet) + # We want a tablet if VNC is enabled, + # or SPICE is enabled and the SPICE agent is disabled + # NB: this implies that if both SPICE + VNC are enabled + # at the same time, we'll get the tablet whether the + # SPICE agent is used or not. + need_usb_tablet = False + if CONF.vnc_enabled: + need_usb_tablet = CONF.use_usb_tablet + elif CONF.spice.enabled and not CONF.spice.agent_enabled: + need_usb_tablet = CONF.use_usb_tablet + + if need_usb_tablet and guest.os_type == vm_mode.HVM: + tablet = vconfig.LibvirtConfigGuestInput() + tablet.type = "tablet" + tablet.bus = "usb" + guest.add_device(tablet) + + if CONF.spice.enabled and CONF.spice.agent_enabled and \ + CONF.libvirt_type not in ('lxc', 'uml', 'xen'): + channel = vconfig.LibvirtConfigGuestChannel() + channel.target_name = "com.redhat.spice.0" + guest.add_device(channel) + + # NB some versions of libvirt support both SPICE and VNC + # at the same time. We're not trying to second guess which + # those versions are. We'll just let libvirt report the + # errors appropriately if the user enables both. + if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "vnc" graphics.keymap = CONF.vnc_keymap graphics.listen = CONF.vncserver_listen guest.add_device(graphics) + if CONF.spice.enabled and \ + CONF.libvirt_type not in ('lxc', 'uml', 'xen'): + graphics = vconfig.LibvirtConfigGuestGraphics() + graphics.type = "spice" + graphics.keymap = CONF.spice.keymap + graphics.listen = CONF.spice.server_listen + guest.add_device(graphics) + return guest def to_xml(self, instance, network_info, image_meta=None, rescue=None, - block_device_info=None): + block_device_info=None, write_to_disk=False): LOG.debug(_('Starting toXML method'), instance=instance) conf = self.get_guest_config(instance, network_info, image_meta, rescue, block_device_info) xml = conf.to_xml() + + if write_to_disk: + instance_dir = os.path.join(CONF.instances_path, + instance["name"]) + xml_path = os.path.join(instance_dir, 'libvirt.xml') + libvirt_utils.write_to_file(xml_path, xml) + LOG.debug(_('Finished toXML method'), instance=instance) return xml @@ -1877,18 +1925,23 @@ class LibvirtDriver(driver.ComputeDriver): 'cpu_time': cpu_time} def _create_domain(self, xml=None, domain=None, - inst_name='', launch_flags=0): + instance=None, launch_flags=0): """Create a domain. Either domain or xml must be passed in. If both are passed, then the domain definition is overwritten from the xml. """ + inst_path = None + if instance: + inst_path = libvirt_utils.get_instance_path(instance) + if CONF.libvirt_type == 'lxc': - container_dir = os.path.join(CONF.instances_path, - inst_name, - 'rootfs') + if not inst_path: + inst_path = None + + container_dir = os.path.join(inst_path, 'rootfs') fileutils.ensure_tree(container_dir) - image = self.image_backend.image(inst_name, 'disk') + image = self.image_backend.image(instance, 'disk') disk.setup_container(image.path, container_dir=container_dir, use_cow=CONF.use_cow_images) @@ -1902,9 +1955,7 @@ class LibvirtDriver(driver.ComputeDriver): # namespace and so there is no need to keep the container rootfs # mounted in the host namespace if CONF.libvirt_type == 'lxc': - container_dir = os.path.join(CONF.instances_path, - inst_name, - 'rootfs') + container_dir = os.path.join(inst_path, 'rootfs') disk.teardown_container(container_dir=container_dir) return domain @@ -1926,7 +1977,7 @@ class LibvirtDriver(driver.ComputeDriver): self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) - domain = self._create_domain(xml, inst_name=instance['name']) + domain = self._create_domain(xml, instance=instance) self.firewall_driver.apply_instance_filter(instance, network_info) return domain @@ -1971,7 +2022,7 @@ class LibvirtDriver(driver.ComputeDriver): def get_interfaces(self, xml): """ - Note that this function takes an domain xml. + Note that this function takes a domain xml. Returns a list of all network interfaces for this instance. """ @@ -2615,7 +2666,7 @@ class LibvirtDriver(driver.ComputeDriver): def _fetch_instance_kernel_ramdisk(self, context, instance): """Download kernel and ramdisk for instance in instance directory.""" - instance_dir = os.path.join(CONF.instances_path, instance['name']) + instance_dir = libvirt_utils.get_instance_path(instance) if instance['kernel_id']: libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'kernel'), @@ -2645,14 +2696,13 @@ class LibvirtDriver(driver.ComputeDriver): if is_volume_backed and not (is_block_migration or is_shared_storage): # Create the instance directory on destination compute node. - instance_dir = os.path.join(CONF.instances_path, - instance_ref['name']) + instance_dir = libvirt_utils.get_instance_path(instance_ref) if os.path.exists(instance_dir): raise exception.DestinationDiskExists(path=instance_dir) os.mkdir(instance_dir) # Touch the console.log file, required by libvirt. - console_file = self._get_console_log_path(instance_ref['name']) + console_file = self._get_console_log_path(instance_ref) libvirt_utils.file_open(console_file, 'a').close() # if image has kernel and ramdisk, just download @@ -2701,7 +2751,7 @@ class LibvirtDriver(driver.ComputeDriver): disk_info = jsonutils.loads(disk_info_json) # make instance directory - instance_dir = os.path.join(CONF.instances_path, instance['name']) + instance_dir = libvirt_utils.get_instance_path(instance) if os.path.exists(instance_dir): raise exception.DestinationDiskExists(path=instance_dir) os.mkdir(instance_dir) @@ -2720,7 +2770,7 @@ class LibvirtDriver(driver.ComputeDriver): # Remove any size tags which the cache manages cache_name = cache_name.split('_')[0] - image = self.image_backend.image(instance['name'], + image = self.image_backend.image(instance, instance_disk, CONF.libvirt_images_type) image.cache(fetch_func=libvirt_utils.fetch_image, @@ -2738,29 +2788,24 @@ class LibvirtDriver(driver.ComputeDriver): def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, - block_migration): + block_migration, + block_device_info=None): """Post operation of live migration at destination host. :param ctxt: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. - :param network_info: instance network infomation + :param network_info: instance network information :param block_migration: if true, post operation of block_migraiton. """ # Define migrated instance, otherwise, suspend/destroy does not work. dom_list = self._conn.listDefinedDomains() if instance_ref["name"] not in dom_list: - instance_dir = os.path.join(CONF.instances_path, - instance_ref["name"]) - xml_path = os.path.join(instance_dir, 'libvirt.xml') # In case of block migration, destination does not have # libvirt.xml - if not os.path.isfile(xml_path): - xml = self.to_xml(instance_ref, network_info=network_info) - f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') - f.write(xml) - f.close() + self.to_xml(instance_ref, network_info, block_device_info, + write_to_disk=True) # libvirt.xml should be made by to_xml(), but libvirt # does not accept to_xml() result, since uuid is not # included in to_xml() result. @@ -2891,7 +2936,6 @@ class LibvirtDriver(driver.ComputeDriver): except Exception: pass - @exception.wrap_exception() def migrate_disk_and_power_off(self, context, instance, dest, instance_type, network_info, block_device_info=None): @@ -2915,7 +2959,7 @@ class LibvirtDriver(driver.ComputeDriver): # rename instance dir to +_resize at first for using # shared storage for instance dir (eg. NFS). same_host = (dest == self.get_host_ip_addr()) - inst_base = "%s/%s" % (CONF.instances_path, instance['name']) + inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" try: utils.execute('mv', inst_base, inst_base_resize) @@ -2957,7 +3001,6 @@ class LibvirtDriver(driver.ComputeDriver): LOG.info(_("Instance running successfully."), instance=instance) raise utils.LoopingCallDone() - @exception.wrap_exception() def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None): @@ -3010,13 +3053,12 @@ class LibvirtDriver(driver.ComputeDriver): instance) timer.start(interval=0.5).wait() - @exception.wrap_exception() def finish_revert_migration(self, instance, network_info, block_device_info=None): LOG.debug(_("Starting finish_revert_migration"), instance=instance) - inst_base = "%s/%s" % (CONF.instances_path, instance['name']) + inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" utils.execute('mv', inst_base_resize, inst_base) @@ -3122,12 +3164,10 @@ class LibvirtDriver(driver.ComputeDriver): def instance_on_disk(self, instance): # ensure directories exist and are writable - instance_path = os.path.join(CONF.instances_path, instance["name"]) - + instance_path = libvirt_utils.get_instance_path(instance) LOG.debug(_('Checking instance files accessability' '%(instance_path)s') % locals()) - return os.access(instance_path, os.W_OK) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index c47056ff2..3323b8f1d 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -228,11 +228,11 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver): def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" self.nwfilter.setup_basic_filtering(instance, network_info) - if not self.basicly_filtered: + if not self.basically_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering'), instance=instance) self.refresh_provider_fw_rules() - self.basicly_filtered = True + self.basically_filtered = True def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter.""" diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index f4c41f539..0815c142f 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -142,8 +142,9 @@ class Raw(Image): def __init__(self, instance=None, name=None, path=None): super(Raw, self).__init__("file", "raw", is_block_dev=False) - self.path = path or os.path.join(CONF.instances_path, - instance, name) + self.path = (path or + os.path.join(libvirt_utils.get_instance_path(instance), + name)) def create_image(self, prepare_template, base, size, *args, **kwargs): @lockutils.synchronized(base, 'nova-', external=True, @@ -170,8 +171,9 @@ class Qcow2(Image): def __init__(self, instance=None, name=None, path=None): super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False) - self.path = path or os.path.join(CONF.instances_path, - instance, name) + self.path = (path or + os.path.join(libvirt_utils.get_instance_path(instance), + name)) def create_image(self, prepare_template, base, size, *args, **kwargs): @lockutils.synchronized(base, 'nova-', external=True, @@ -208,7 +210,7 @@ class Lvm(Image): ' libvirt_images_volume_group' ' flag to use LVM images.')) self.vg = CONF.libvirt_images_volume_group - self.lv = '%s_%s' % (self.escape(instance), + self.lv = '%s_%s' % (self.escape(instance['name']), self.escape(name)) self.path = os.path.join('/dev', self.vg, self.lv) @@ -226,7 +228,7 @@ class Lvm(Image): cmd = ('dd', 'if=%s' % base, 'of=%s' % self.path, 'bs=4M') utils.execute(*cmd, run_as_root=True) if resize: - disk.resize2fs(self.path) + disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index 50fac9bb4..8f677b482 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -77,7 +77,7 @@ CONF.import_opt('instances_path', 'nova.compute.manager') def get_info_filename(base_path): - """Construct a filename for storing addtional information about a base + """Construct a filename for storing additional information about a base image. Returns a filename. diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 9c8d192c7..4b3517da7 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -30,6 +30,7 @@ from nova import utils from nova.virt import images CONF = cfg.CONF +CONF.import_opt('instances_path', 'nova.compute.manager') LOG = logging.getLogger(__name__) @@ -498,3 +499,19 @@ def get_fs_info(path): def fetch_image(context, target, image_id, user_id, project_id): """Grab image.""" images.fetch_to_raw(context, image_id, target, user_id, project_id) + + +def get_instance_path(instance): + """Determine the correct path for instance storage. + + This used to be calculated all over the place. This method centralizes + this into one location, which will make it easier to change the + algorithm used to name instance storage directories. + + :param instance: the instance we want a path for + + :returns: a path to store information about that instance + """ + # TODO(mikal): we should use UUID instead of name, as name isn't + # nessesarily unique + return os.path.join(CONF.instances_path, instance['name']) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 0cf1b1658..83d43a6db 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -21,6 +21,7 @@ from nova import exception from nova.network import linux_net +from nova.network import model as network_model from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils @@ -31,9 +32,11 @@ from nova.virt import netutils LOG = logging.getLogger(__name__) libvirt_vif_opts = [ + # quantum_ovs_bridge is used, if Quantum provides Nova + # the 'vif_type' portbinding field cfg.StrOpt('libvirt_ovs_bridge', - default='br-int', - help='Name of Integration Bridge used by Open vSwitch'), + default='br-int', + help='Name of Integration Bridge used by Open vSwitch'), cfg.BoolOpt('libvirt_use_virtio_for_bridges', default=True, help='Use virtio for bridge interfaces with KVM/QEMU'), @@ -44,11 +47,14 @@ CONF.register_opts(libvirt_vif_opts) CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver') CONF.import_opt('use_ipv6', 'nova.netconf') -LINUX_DEV_LEN = 14 - class LibvirtBaseVIFDriver(object): + def get_vif_devname(self, mapping): + if 'vif_devname' in mapping: + return mapping['vif_devname'] + return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN] + def get_config(self, instance, network, mapping): conf = vconfig.LibvirtConfigGuestInterface() model = None @@ -70,6 +76,9 @@ class LibvirtBaseVIFDriver(object): class LibvirtBridgeDriver(LibvirtBaseVIFDriver): """VIF driver for Linux bridge.""" + def get_bridge_name(self, network): + return network['bridge'] + def get_config(self, instance, network, mapping): """Get VIF configurations for bridge type.""" @@ -81,7 +90,8 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver): mapping) designer.set_vif_host_backend_bridge_config( - conf, network['bridge'], None) + conf, self.get_bridge_name(network), + self.get_vif_devname(mapping)) name = "nova-instance-" + instance['name'] + "-" + mac_id primary_addr = mapping['ips'][0]['ip'] @@ -111,18 +121,18 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver): iface = CONF.vlan_interface or network['bridge_interface'] LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), {'vlan': network['vlan'], - 'bridge': network['bridge']}, + 'bridge': self.get_bridge_name(network)}, instance=instance) linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( network['vlan'], - network['bridge'], + self.get_bridge_name(network), iface) else: iface = CONF.flat_interface or network['bridge_interface'] - LOG.debug(_("Ensuring bridge %s"), network['bridge'], - instance=instance) + LOG.debug(_("Ensuring bridge %s"), + self.get_bridge_name(network), instance=instance) linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( - network['bridge'], + self.get_bridge_name(network), iface) def unplug(self, instance, vif): @@ -137,11 +147,11 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): OVS virtual port XML (0.9.10 or earlier). """ - def get_dev_name(self, iface_id): - return ("tap" + iface_id)[:LINUX_DEV_LEN] + def get_bridge_name(self, network): + return network.get('bridge') or CONF.libvirt_ovs_bridge def get_config(self, instance, network, mapping): - dev = self.get_dev_name(mapping['vif_uuid']) + dev = self.get_vif_devname(mapping) conf = super(LibvirtOpenVswitchDriver, self).get_config(instance, @@ -152,25 +162,25 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): return conf - def create_ovs_vif_port(self, dev, iface_id, mac, instance_id): + def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id): utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port', - CONF.libvirt_ovs_bridge, dev, - '--', 'set', 'Interface', dev, - 'external-ids:iface-id=%s' % iface_id, - 'external-ids:iface-status=active', - 'external-ids:attached-mac=%s' % mac, - 'external-ids:vm-uuid=%s' % instance_id, - run_as_root=True) - - def delete_ovs_vif_port(self, dev): - utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge, - dev, run_as_root=True) + bridge, dev, + '--', 'set', 'Interface', dev, + 'external-ids:iface-id=%s' % iface_id, + 'external-ids:iface-status=active', + 'external-ids:attached-mac=%s' % mac, + 'external-ids:vm-uuid=%s' % instance_id, + run_as_root=True) + + def delete_ovs_vif_port(self, bridge, dev): + utils.execute('ovs-vsctl', 'del-port', bridge, dev, + run_as_root=True) utils.execute('ip', 'link', 'delete', dev, run_as_root=True) def plug(self, instance, vif): network, mapping = vif iface_id = mapping['vif_uuid'] - dev = self.get_dev_name(iface_id) + dev = self.get_vif_devname(mapping) if not linux_net.device_exists(dev): # Older version of the command 'ip' from the iproute2 package # don't have support for the tuntap option (lp:882568). If it @@ -185,14 +195,16 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): utils.execute('tunctl', '-b', '-t', dev, run_as_root=True) utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True) - self.create_ovs_vif_port(dev, iface_id, mapping['mac'], + self.create_ovs_vif_port(self.get_bridge_name(network), + dev, iface_id, mapping['mac'], instance['uuid']) def unplug(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" try: network, mapping = vif - self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid'])) + self.delete_ovs_vif_port(self.get_bridge_name(network), + self.get_vif_devname(mapping)) except exception.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) @@ -208,11 +220,14 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, """ def get_br_name(self, iface_id): - return ("qbr" + iface_id)[:LINUX_DEV_LEN] + return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN] def get_veth_pair_names(self, iface_id): - return (("qvb%s" % iface_id)[:LINUX_DEV_LEN], - ("qvo%s" % iface_id)[:LINUX_DEV_LEN]) + return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], + ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) + + def get_bridge_name(self, network): + return network.get('bridge') or CONF.libvirt_ovs_bridge def get_config(self, instance, network, mapping): br_name = self.get_br_name(mapping['vif_uuid']) @@ -243,7 +258,8 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, linux_net._create_veth_pair(v1_name, v2_name) utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True) utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True) - self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'], + self.create_ovs_vif_port(self.get_bridge_name(network), + v2_name, iface_id, mapping['mac'], instance['uuid']) def unplug(self, instance, vif): @@ -263,7 +279,7 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) - self.delete_ovs_vif_port(v2_name) + self.delete_ovs_vif_port(self.get_bridge_name(network), v2_name) except exception.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) @@ -272,6 +288,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver): """VIF driver for Open vSwitch that uses integrated libvirt OVS virtual port XML (introduced in libvirt 0.9.11).""" + def get_bridge_name(self, network): + return network.get('bridge') or CONF.libvirt_ovs_bridge + def get_config(self, instance, network, mapping): """Pass data required to create OVS virtual port element.""" conf = super(LibvirtOpenVswitchVirtualPortDriver, @@ -280,7 +299,8 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver): mapping) designer.set_vif_host_backend_ovs_config( - conf, CONF.libvirt_ovs_bridge, mapping['vif_uuid']) + conf, self.get_bridge_name(network), mapping['vif_uuid'], + self.get_vif_devname(mapping)) return conf @@ -295,19 +315,15 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver): class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver): """VIF driver for Linux Bridge when running Quantum.""" - def get_bridge_name(self, network_id): - return ("brq" + network_id)[:LINUX_DEV_LEN] - - def get_dev_name(self, iface_id): - return ("tap" + iface_id)[:LINUX_DEV_LEN] + def get_bridge_name(self, network): + def_bridge = ("brq" + network['id'])[:network_model.NIC_NAME_LEN] + return network.get('bridge') or def_bridge def get_config(self, instance, network, mapping): - iface_id = mapping['vif_uuid'] - dev = self.get_dev_name(iface_id) - - bridge = self.get_bridge_name(network['id']) - linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None, - filtering=False) + linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( + self.get_bridge_name(network), + None, + filtering=False) conf = super(QuantumLinuxBridgeVIFDriver, self).get_config(instance, @@ -315,7 +331,8 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver): mapping) designer.set_vif_host_backend_bridge_config( - conf, bridge, dev) + conf, self.get_bridge_name(network), + self.get_vif_devname(mapping)) return conf diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py index b25a96159..5a4a2938b 100644 --- a/nova/virt/powervm/operator.py +++ b/nova/virt/powervm/operator.py @@ -55,7 +55,7 @@ def get_powervm_disk_adapter(): class PowerVMOperator(object): """PowerVM main operator. - The PowerVMOperator is intented to wrapper all operations + The PowerVMOperator is intended to wrap all operations from the driver and handle either IVM or HMC managed systems. """ diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py index fa6f6ceb5..66e7d9b02 100644 --- a/nova/virt/vmwareapi/__init__.py +++ b/nova/virt/vmwareapi/__init__.py @@ -18,4 +18,4 @@ :mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API. """ # NOTE(sdague) for nicer compute_driver specification -from nova.virt.vmwareapi.driver import VMWareESXDriver +from nova.virt.vmwareapi.driver import VMwareESXDriver diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index 8734df1f6..986c4ef28 100644 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -52,29 +52,29 @@ LOG = logging.getLogger(__name__) vmwareapi_opts = [ cfg.StrOpt('vmwareapi_host_ip', default=None, - help='URL for connection to VMWare ESX host.Required if ' - 'compute_driver is vmwareapi.VMWareESXDriver.'), + help='URL for connection to VMware ESX host.Required if ' + 'compute_driver is vmwareapi.VMwareESXDriver.'), cfg.StrOpt('vmwareapi_host_username', default=None, - help='Username for connection to VMWare ESX host. ' + help='Username for connection to VMware ESX host. ' 'Used only if compute_driver is ' - 'vmwareapi.VMWareESXDriver.'), + 'vmwareapi.VMwareESXDriver.'), cfg.StrOpt('vmwareapi_host_password', default=None, - help='Password for connection to VMWare ESX host. ' + help='Password for connection to VMware ESX host. ' 'Used only if compute_driver is ' - 'vmwareapi.VMWareESXDriver.'), + 'vmwareapi.VMwareESXDriver.'), cfg.FloatOpt('vmwareapi_task_poll_interval', default=5.0, help='The interval used for polling of remote tasks. ' 'Used only if compute_driver is ' - 'vmwareapi.VMWareESXDriver.'), + 'vmwareapi.VMwareESXDriver.'), cfg.IntOpt('vmwareapi_api_retry_count', default=10, help='The number of times we retry on failures, e.g., ' 'socket error, etc. ' 'Used only if compute_driver is ' - 'vmwareapi.VMWareESXDriver.'), + 'vmwareapi.VMwareESXDriver.'), ] CONF = cfg.CONF @@ -93,11 +93,11 @@ class Failure(Exception): return str(self.details) -class VMWareESXDriver(driver.ComputeDriver): +class VMwareESXDriver(driver.ComputeDriver): """The ESX host connection object.""" def __init__(self, virtapi, read_only=False, scheme="https"): - super(VMWareESXDriver, self).__init__(virtapi) + super(VMwareESXDriver, self).__init__(virtapi) host_ip = CONF.vmwareapi_host_ip host_username = CONF.vmwareapi_host_username @@ -107,11 +107,11 @@ class VMWareESXDriver(driver.ComputeDriver): raise Exception(_("Must specify vmwareapi_host_ip," "vmwareapi_host_username " "and vmwareapi_host_password to use" - "compute_driver=vmwareapi.VMWareESXDriver")) + "compute_driver=vmwareapi.VMwareESXDriver")) - session = VMWareAPISession(host_ip, host_username, host_password, + session = VMwareAPISession(host_ip, host_username, host_password, api_retry_count, scheme=scheme) - self._vmops = vmops.VMWareVMOps(session) + self._vmops = vmops.VMwareVMOps(session) def init_host(self, host): """Do the initialization that needs to be done.""" @@ -209,7 +209,7 @@ class VMWareESXDriver(driver.ComputeDriver): self._vmops.unplug_vifs(instance, network_info) -class VMWareAPISession(object): +class VMwareAPISession(object): """ Sets up a session with the ESX host and handles all the calls made to the host. diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index fdf85dc8b..3f5041c22 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -16,7 +16,7 @@ # under the License. """ -A fake VMWare VI API implementation. +A fake VMware VI API implementation. """ import pprint diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_util.py index a3b20137d..d2bdad0c1 100644 --- a/nova/virt/vmwareapi/network_utils.py +++ b/nova/virt/vmwareapi/network_util.py @@ -38,7 +38,7 @@ def get_network_with_the_name(session, network_name="vmnet0"): vm_networks_ret = hostsystems[0].propSet[0].val # Meaning there are no networks on the host. suds responds with a "" # in the parent property field rather than a [] in the - # ManagedObjectRefernce property field of the parent + # ManagedObjectReference property field of the parent if not vm_networks_ret: return None vm_networks = vm_networks_ret.ManagedObjectReference diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py index 52d095ad3..39ea8e2e8 100644 --- a/nova/virt/vmwareapi/read_write_util.py +++ b/nova/virt/vmwareapi/read_write_util.py @@ -108,8 +108,8 @@ class VMwareHTTPFile(object): raise NotImplementedError -class VMWareHTTPWriteFile(VMwareHTTPFile): - """VMWare file write handler class.""" +class VMwareHTTPWriteFile(VMwareHTTPFile): + """VMware file write handler class.""" def __init__(self, host, data_center_name, datastore_name, cookies, file_path, file_size, scheme="https"): @@ -140,12 +140,12 @@ class VMWareHTTPWriteFile(VMwareHTTPFile): self.conn.getresponse() except Exception, excep: LOG.debug(_("Exception during HTTP connection close in " - "VMWareHTTpWrite. Exception is %s") % excep) - super(VMWareHTTPWriteFile, self).close() + "VMwareHTTpWrite. Exception is %s") % excep) + super(VMwareHTTPWriteFile, self).close() -class VmWareHTTPReadFile(VMwareHTTPFile): - """VMWare file read handler class.""" +class VMwareHTTPReadFile(VMwareHTTPFile): + """VMware file read handler class.""" def __init__(self, host, data_center_name, datastore_name, cookies, file_path, scheme="https"): diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index 4d53e266d..c5b524186 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -15,12 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. -"""VIF drivers for VMWare.""" +"""VIF drivers for VMware.""" from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging -from nova.virt.vmwareapi import network_utils +from nova.virt.vmwareapi import network_util LOG = logging.getLogger(__name__) @@ -44,28 +44,28 @@ def ensure_vlan_bridge(self, session, network): # Check if the vlan_interface physical network adapter exists on the # host. - if not network_utils.check_if_vlan_interface_exists(session, + if not network_util.check_if_vlan_interface_exists(session, vlan_interface): raise exception.NetworkAdapterNotFound(adapter=vlan_interface) # Get the vSwitch associated with the Physical Adapter - vswitch_associated = network_utils.get_vswitch_for_vlan_interface( + vswitch_associated = network_util.get_vswitch_for_vlan_interface( session, vlan_interface) if vswitch_associated is None: raise exception.SwitchNotFoundForNetworkAdapter( adapter=vlan_interface) # Check whether bridge already exists and retrieve the the ref of the # network whose name_label is "bridge" - network_ref = network_utils.get_network_with_the_name(session, bridge) + network_ref = network_util.get_network_with_the_name(session, bridge) if network_ref is None: # Create a port group on the vSwitch associated with the # vlan_interface corresponding physical network adapter on the ESX # host. - network_utils.create_port_group(session, bridge, + network_util.create_port_group(session, bridge, vswitch_associated, vlan_num) else: # Get the vlan id and vswitch corresponding to the port group - _get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup + _get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup pg_vlanid, pg_vswitch = _get_pg_info(session, bridge) # Check if the vswitch associated is proper diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py index 194b78a1d..83d120df5 100644 --- a/nova/virt/vmwareapi/vim.py +++ b/nova/virt/vmwareapi/vim.py @@ -1,5 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack LLC. # @@ -101,69 +102,65 @@ class Vim: def __getattr__(self, attr_name): """Makes the API calls and gets the result.""" - try: - return getattr(self, attr_name) - except AttributeError: - - def vim_request_handler(managed_object, **kwargs): - """ - Builds the SOAP message and parses the response for fault - checking and other errors. - - managed_object : Managed Object Reference or Managed - Object Name - **kwargs : Keyword arguments of the call - """ - # Dynamic handler for VI SDK Calls - try: - request_mo = self._request_managed_object_builder( - managed_object) - request = getattr(self.client.service, attr_name) - response = request(request_mo, **kwargs) - # To check for the faults that are part of the message body - # and not returned as Fault object response from the ESX - # SOAP server - if hasattr(error_util.FaultCheckers, - attr_name.lower() + "_fault_checker"): - fault_checker = getattr(error_util.FaultCheckers, - attr_name.lower() + "_fault_checker") - fault_checker(response) - return response - # Catch the VimFaultException that is raised by the fault - # check of the SOAP response - except error_util.VimFaultException, excep: - raise - except suds.WebFault, excep: - doc = excep.document - detail = doc.childAtPath("/Envelope/Body/Fault/detail") - fault_list = [] - for child in detail.getChildren(): - fault_list.append(child.get("type")) - raise error_util.VimFaultException(fault_list, excep) - except AttributeError, excep: - raise error_util.VimAttributeError(_("No such SOAP method " - "'%s' provided by VI SDK") % (attr_name), excep) - except (httplib.CannotSendRequest, - httplib.ResponseNotReady, - httplib.CannotSendHeader), excep: - raise error_util.SessionOverLoadException(_("httplib " - "error in %s: ") % (attr_name), excep) - except Exception, excep: - # Socket errors which need special handling for they - # might be caused by ESX API call overload - if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or - str(excep).find(CONN_ABORT_ERROR)) != -1: - raise error_util.SessionOverLoadException(_("Socket " - "error in %s: ") % (attr_name), excep) - # Type error that needs special handling for it might be - # caused by ESX host API call overload - elif str(excep).find(RESP_NOT_XML_ERROR) != -1: - raise error_util.SessionOverLoadException(_("Type " - "error in %s: ") % (attr_name), excep) - else: - raise error_util.VimException( - _("Exception in %s ") % (attr_name), excep) - return vim_request_handler + def vim_request_handler(managed_object, **kwargs): + """ + Builds the SOAP message and parses the response for fault + checking and other errors. + + managed_object : Managed Object Reference or Managed + Object Name + **kwargs : Keyword arguments of the call + """ + # Dynamic handler for VI SDK Calls + try: + request_mo = self._request_managed_object_builder( + managed_object) + request = getattr(self.client.service, attr_name) + response = request(request_mo, **kwargs) + # To check for the faults that are part of the message body + # and not returned as Fault object response from the ESX + # SOAP server + if hasattr(error_util.FaultCheckers, + attr_name.lower() + "_fault_checker"): + fault_checker = getattr(error_util.FaultCheckers, + attr_name.lower() + "_fault_checker") + fault_checker(response) + return response + # Catch the VimFaultException that is raised by the fault + # check of the SOAP response + except error_util.VimFaultException, excep: + raise + except suds.WebFault, excep: + doc = excep.document + detail = doc.childAtPath("/Envelope/Body/Fault/detail") + fault_list = [] + for child in detail.getChildren(): + fault_list.append(child.get("type")) + raise error_util.VimFaultException(fault_list, excep) + except AttributeError, excep: + raise error_util.VimAttributeError(_("No such SOAP method " + "'%s' provided by VI SDK") % (attr_name), excep) + except (httplib.CannotSendRequest, + httplib.ResponseNotReady, + httplib.CannotSendHeader), excep: + raise error_util.SessionOverLoadException(_("httplib " + "error in %s: ") % (attr_name), excep) + except Exception, excep: + # Socket errors which need special handling for they + # might be caused by ESX API call overload + if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or + str(excep).find(CONN_ABORT_ERROR)) != -1: + raise error_util.SessionOverLoadException(_("Socket " + "error in %s: ") % (attr_name), excep) + # Type error that needs special handling for it might be + # caused by ESX host API call overload + elif str(excep).find(RESP_NOT_XML_ERROR) != -1: + raise error_util.SessionOverLoadException(_("Type " + "error in %s: ") % (attr_name), excep) + else: + raise error_util.VimException( + _("Exception in %s ") % (attr_name), excep) + return vim_request_handler def _request_managed_object_builder(self, managed_object): """Builds the request managed object.""" diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index 740355679..e03b88804 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -26,7 +26,7 @@ def build_datastore_path(datastore_name, path): def split_datastore_path(datastore_path): """ - Split the VMWare style datastore path to get the Datastore + Split the VMware style datastore path to get the Datastore name and the entity path. """ spl = datastore_path.split('[', 1)[1].split(']', 1) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index e591245e2..883e751a8 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -32,7 +32,7 @@ from nova import exception from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import log as logging -from nova.virt.vmwareapi import network_utils +from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vif as vmwarevif from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util @@ -49,7 +49,7 @@ VMWARE_POWER_STATES = { 'suspended': power_state.PAUSED} -class VMWareVMOps(object): +class VMwareVMOps(object): """Management class for VM-related tasks.""" def __init__(self, session): @@ -157,7 +157,7 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors() def _check_if_network_bridge_exists(network_name): - network_ref = network_utils.get_network_with_the_name( + network_ref = network_util.get_network_with_the_name( self._session, network_name) if network_ref is None: raise exception.NetworkNotFoundForBridge(bridge=network_name) diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 15237fd5b..7c4480ea0 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -50,11 +50,11 @@ def start_transfer(context, read_file_handle, data_size, # to read. read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe) - # In case of Glance - VMWare transfer, we just need a handle to the - # HTTP Connection that is to send transfer data to the VMWare datastore. + # In case of Glance - VMware transfer, we just need a handle to the + # HTTP Connection that is to send transfer data to the VMware datastore. if write_file_handle: write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle) - # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read + # In case of VMware - Glance transfer, we relinquish VMware HTTP file read # handle to Glance Client instance, but to be sure of the transfer we need # to be sure of the status of the image on glnace changing to active. # The GlanceWriteThread handles the same for us. @@ -96,7 +96,7 @@ def fetch_image(context, image, instance, **kwargs): f = StringIO.StringIO() image_service.download(context, image_id, f) read_file_handle = read_write_util.GlanceFileRead(f) - write_file_handle = read_write_util.VMWareHTTPWriteFile( + write_file_handle = read_write_util.VMwareHTTPWriteFile( kwargs.get("host"), kwargs.get("data_center_name"), kwargs.get("datastore_name"), @@ -113,7 +113,7 @@ def upload_image(context, image, instance, **kwargs): """Upload the snapshotted vm disk file to Glance image server.""" LOG.debug(_("Uploading image %s to the Glance image server") % image, instance=instance) - read_file_handle = read_write_util.VmWareHTTPReadFile( + read_file_handle = read_write_util.VMwareHTTPReadFile( kwargs.get("host"), kwargs.get("data_center_name"), kwargs.get("datastore_name"), diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index 61cfa9631..ef08edbc1 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -21,6 +21,9 @@ import os import time import uuid +from nova.api.metadata import password +from nova import context +from nova import crypto from nova.openstack.common import cfg from nova.openstack.common import jsonutils from nova.openstack.common import log as logging @@ -207,6 +210,12 @@ class XenAPIBasedAgent(object): LOG.error(msg, instance=self.instance) raise Exception(msg) + sshkey = self.instance.get('key_data') + if sshkey: + enc = crypto.ssh_encrypt_text(sshkey, new_pass) + password.set_password(context.get_admin_context(), + self.instance['uuid'], base64.b64encode(enc)) + return resp['message'] def inject_file(self, path, contents): diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 0acc360e8..a894e95b9 100644 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -499,14 +499,15 @@ class XenAPIDriver(driver.ComputeDriver): pass def post_live_migration_at_destination(self, ctxt, instance_ref, - network_info, block_migration): + network_info, block_migration, + block_device_info=None): """Post operation of live migration at destination host. :params ctxt: security context :params instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. - :params network_info: instance network infomation + :params network_info: instance network information :params : block_migration: if true, post operation of block_migraiton. """ # TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py index 5bf326117..138f84831 100644 --- a/nova/virt/xenapi/pool_states.py +++ b/nova/virt/xenapi/pool_states.py @@ -19,10 +19,10 @@ A pool may be 'created', in which case the admin has triggered its creation, but the underlying hypervisor pool has not actually being set up -yet. An pool may be 'changing', meaning that the underlying hypervisor -pool is being setup. An pool may be 'active', in which case the underlying -hypervisor pool is up and running. An pool may be 'dismissed' when it has -no hosts and it has been deleted. An pool may be in 'error' in all other +yet. A pool may be 'changing', meaning that the underlying hypervisor +pool is being setup. A pool may be 'active', in which case the underlying +hypervisor pool is up and running. A pool may be 'dismissed' when it has +no hosts and it has been deleted. A pool may be in 'error' in all other cases. A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; @@ -34,7 +34,7 @@ All other operations (e.g. add/remove hosts) that succeed will keep the pool in the 'active' state. If a number of continuous requests fail, an 'active' pool goes into an 'error' state. To recover from such a state, admin intervention is required. Currently an error state is irreversible, -that is, in order to recover from it an pool must be deleted. +that is, in order to recover from it a pool must be deleted. """ CREATED = 'created' diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6a0116098..52a5f37b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -883,11 +883,6 @@ def generate_configdrive(session, instance, vm_ref, userdevice, try: with vdi_attached_here(session, vdi_ref, read_only=False) as dev: - dev_path = utils.make_dev_path(dev) - - # NOTE(mikal): libvirt supports injecting the admin password as - # well. This is not currently implemented for xenapi as it is not - # supported by the existing file injection extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password @@ -899,6 +894,7 @@ def generate_configdrive(session, instance, vm_ref, userdevice, tmp_file = os.path.join(tmp_path, 'configdrive') cdb.make_drive(tmp_file) + dev_path = utils.make_dev_path(dev) utils.execute('dd', 'if=%s' % tmp_file, 'of=%s' % dev_path, @@ -1514,7 +1510,7 @@ def fetch_bandwidth(session): def compile_metrics(start_time, stop_time=None): """Compile bandwidth usage, cpu, and disk metrics for all VMs on this host. - Note that some stats, like bandwith, do not seem to be very + Note that some stats, like bandwidth, do not seem to be very accurate in some of the data from XenServer (mdragon). """ start_time = int(start_time) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5f79b6c3a..c2d717cfd 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -125,6 +125,7 @@ class VolumeOps(object): try: vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref, device_number) + sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) except volume_utils.StorageError, exc: LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) @@ -143,7 +144,6 @@ class VolumeOps(object): # Forget SR only if no other volumes on this host are using it try: - sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) volume_utils.purge_sr(self._session, sr_ref) except volume_utils.StorageError, exc: LOG.exception(exc) diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 514295605..3e1ccc66b 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -42,9 +42,15 @@ cinder_opts = [ default=None, help='Override service catalog lookup with template for cinder ' 'endpoint e.g. http://localhost:8776/v1/%(project_id)s'), + cfg.StrOpt('os_region_name', + default=None, + help='region name of this node'), cfg.IntOpt('cinder_http_retries', default=3, help='Number of cinderclient retries on failed http calls'), + cfg.BoolOpt('cinder_api_insecure', + default=False, + help='Allow to perform insecure SSL requests to cinder'), ] CONF = cfg.CONF @@ -66,7 +72,16 @@ def cinderclient(context): else: info = CONF.cinder_catalog_info service_type, service_name, endpoint_type = info.split(':') - url = sc.url_for(service_type=service_type, + # extract the region if set in configuration + if CONF.os_region_name: + attr = 'region' + filter_value = CONF.os_region_name + else: + attr = None + filter_value = None + url = sc.url_for(attr=attr, + filter_value=filter_value, + service_type=service_type, service_name=service_name, endpoint_type=endpoint_type) @@ -76,6 +91,7 @@ def cinderclient(context): context.auth_token, project_id=context.project_id, auth_url=url, + insecure=CONF.cinder_api_insecure, retries=CONF.cinder_http_retries) # noauth extracts user_id:project_id from auth_token c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id, diff --git a/nova/wsgi.py b/nova/wsgi.py index c103526da..0a7570b6c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -28,6 +28,7 @@ import eventlet.wsgi import greenlet from paste import deploy import routes.middleware +import ssl import webob.dec import webob.exc @@ -45,7 +46,21 @@ wsgi_opts = [ help='A python format string that is used as the template to ' 'generate log lines. The following values can be formatted ' 'into it: client_ip, date_time, request_line, status_code, ' - 'body_length, wall_seconds.') + 'body_length, wall_seconds.'), + cfg.StrOpt('ssl_ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + default=None, + help="SSL certificate of API server"), + cfg.StrOpt('ssl_key_file', + default=None, + help="SSL private key of API server"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.") ] CONF = cfg.CONF CONF.register_opts(wsgi_opts) @@ -59,7 +74,8 @@ class Server(object): default_pool_size = 1000 def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, - protocol=eventlet.wsgi.HttpProtocol, backlog=128): + protocol=eventlet.wsgi.HttpProtocol, backlog=128, + use_ssl=False): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. @@ -78,18 +94,27 @@ class Server(object): self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._wsgi_logger = logging.WritableLogger(self._logger) + self._use_ssl = use_ssl if backlog < 1: raise exception.InvalidInput( reason='The backlog must be more than 1') + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix try: - socket.inet_pton(socket.AF_INET6, host) - family = socket.AF_INET6 + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] except Exception: family = socket.AF_INET - self._socket = eventlet.listen((host, port), family, backlog=backlog) + self._socket = eventlet.listen(bind_addr, family, backlog=backlog) (self.host, self.port) = self._socket.getsockname()[0:2] LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__) @@ -98,6 +123,60 @@ class Server(object): :returns: None """ + if self._use_ssl: + try: + ca_file = CONF.ssl_ca_file + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError( + _("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError( + _("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError( + _("Unable to find key_file : %s") % key_file) + + if self._use_ssl and (not cert_file or not key_file): + raise RuntimeError( + _("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + self._socket = eventlet.wrap_ssl(self._socket, + **ssl_kwargs) + + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self._socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + except Exception: + LOG.error(_("Failed to start %(name)s on %(host)s" + ":%(port)s with SSL support") % self.__dict__) + raise + self._server = eventlet.spawn(eventlet.wsgi.server, self._socket, self.app, diff --git a/openstack-common.conf b/openstack-common.conf index ea33ab235..a0b14e651 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,7 +1,7 @@ [DEFAULT] # The list of modules to copy from openstack-common -modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils +modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils # The base module to hold the copy of openstack.common base=nova diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 35316a9b8..b9e9da2e2 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -16,7 +16,7 @@ # under the License. """ -XenAPI Plugin for transfering data between host nodes +XenAPI Plugin for transferring data between host nodes """ import utils diff --git a/run_tests.sh b/run_tests.sh index 1a54c1bef..238f5e194 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -81,15 +81,19 @@ function run_tests { if [ $coverage -eq 1 ]; then # Do not test test_coverage_ext when gathering coverage. if [ "x$testrargs" = "x" ]; then - testrargs="^(?!.*test_coverage_ext).*$" + testrargs="^(?!.*test.*coverage).*$" fi - export PYTHON="${wrapper} coverage run --source nova --parallel-mode" + TESTRTESTS="$TESTRTESTS --coverage" + else + TESTRTESTS="$TESTRTESTS --slowest" fi + # Just run the test suites in current environment set +e - TESTRTESTS="$TESTRTESTS $testrargs" + testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` + TESTRTESTS="$TESTRTESTS --testr-args='$testrargs'" echo "Running \`${wrapper} $TESTRTESTS\`" - ${wrapper} $TESTRTESTS + bash -c "${wrapper} $TESTRTESTS" RESULT=$? set -e @@ -121,8 +125,14 @@ function run_pep8 { srcfiles+=" setup.py" # Until all these issues get fixed, ignore. - ignore='--ignore=E12,E711,E721,E712' + ignore='--ignore=E12,E711,E721,E712,N403,N404' + + # First run the hacking selftest, to make sure it's right + echo "Running hacking.py self test" + ${wrapper} python tools/hacking.py --doctest + # Then actually run it + echo "Running pep8" ${wrapper} python tools/hacking.py ${ignore} ${srcfiles} # NOTE(sdague): as of grizzly-2 these are passing however leaving the comment @@ -137,7 +147,7 @@ function run_pep8 { } -TESTRTESTS="testr run --parallel $testropts" +TESTRTESTS="python setup.py testr $testropts" if [ $never_venv -eq 0 ] then @@ -66,6 +66,7 @@ setuptools.setup(name='nova', 'bin/nova-objectstore', 'bin/nova-rootwrap', 'bin/nova-scheduler', + 'bin/nova-spicehtml5proxy', 'bin/nova-xvpvncproxy', ], py_modules=[]) diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py index 3185cb93d..4dde53335 100644 --- a/tools/conf/extract_opts.py +++ b/tools/conf/extract_opts.py @@ -2,7 +2,6 @@ # Copyright 2012 SINA Corporation # All Rights Reserved. -# Author: Zhongyue Luo <lzyeval@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,6 +14,9 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +# +# @author: Zhongyue Luo, SINA Corporation. +# """Extracts OpenStack config option info from module(s).""" @@ -35,6 +37,15 @@ FLOATOPT = "FloatOpt" LISTOPT = "ListOpt" MULTISTROPT = "MultiStrOpt" +OPT_TYPES = { + STROPT: 'string value', + BOOLOPT: 'boolean value', + INTOPT: 'integer value', + FLOATOPT: 'floating point value', + LISTOPT: 'list value', + MULTISTROPT: 'multi valued', +} + OPTION_COUNT = 0 OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT, FLOATOPT, LISTOPT, @@ -63,10 +74,6 @@ def main(srcfiles): # The options list is a list of (module, options) tuples opts_by_group = {'DEFAULT': []} - opts_by_group['DEFAULT'].append( - (cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__, - _list_opts(cfg.CommonConfigOpts)[0][1])) - for pkg_name in pkg_names: mods = mods_by_pkg.get(pkg_name) mods.sort() @@ -187,33 +194,19 @@ def _get_my_ip(): return None -MY_IP = _get_my_ip() -HOST = socket.getfqdn() - - def _sanitize_default(s): """Set up a reasonably sensible default for pybasedir, my_ip and host.""" if s.startswith(BASEDIR): return s.replace(BASEDIR, '/usr/lib/python/site-packages') - elif s == MY_IP: + elif s == _get_my_ip(): return '10.0.0.1' - elif s == HOST: + elif s == socket.getfqdn(): return 'nova' elif s.strip() != s: return '"%s"' % s return s -OPT_TYPES = { - 'StrOpt': 'string value', - 'BoolOpt': 'boolean value', - 'IntOpt': 'integer value', - 'FloatOpt': 'floating point value', - 'ListOpt': 'list value', - 'MultiStrOpt': 'multi valued', -} - - def _print_opt(opt): opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help if not opt_help: diff --git a/tools/hacking.py b/tools/hacking.py index 7322fd071..56f6694bd 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -21,7 +21,6 @@ built on top of pep8.py """ -import fnmatch import inspect import logging import os @@ -46,16 +45,15 @@ logging.disable('LOG') #N8xx git commit messages IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session'] -DOCSTRING_TRIPLE = ['"""', "'''"] +START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"] +END_DOCSTRING_TRIPLE = ['"""', "'''"] VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False') # Monkey patch broken excluded filter in pep8 # See https://github.com/jcrocholl/pep8/pull/111 def excluded(self, filename): - """ - Check if options.exclude contains a pattern that matches filename. - """ + """Check if options.exclude contains a pattern that matches filename.""" basename = os.path.basename(filename) return any((pep8.filename_match(filename, self.options.exclude, default=False), @@ -110,68 +108,85 @@ def nova_todo_format(physical_line): nova HACKING guide recommendation for TODO: Include your name with TODOs as in "#TODO(termie)" - N101 + + Okay: #TODO(sdague) + N101: #TODO fail """ + # TODO(sdague): TODO check shouldn't fail inside of space pos = physical_line.find('TODO') pos1 = physical_line.find('TODO(') pos2 = physical_line.find('#') # make sure it's a comment - if (pos != pos1 and pos2 >= 0 and pos2 < pos): - return pos, "NOVA N101: Use TODO(NAME)" + # TODO(sdague): should be smarter on this test + this_test = physical_line.find('N101: #TODO fail') + if pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1: + return pos, "N101: Use TODO(NAME)" def nova_except_format(logical_line): - """Check for 'except:'. + r"""Check for 'except:'. nova HACKING guide recommends not using except: Do not write "except:", use "except Exception:" at the very least - N201 + + Okay: except Exception: + N201: except: """ if logical_line.startswith("except:"): - yield 6, "NOVA N201: no 'except:' at least use 'except Exception:'" + yield 6, "N201: no 'except:' at least use 'except Exception:'" def nova_except_format_assert(logical_line): - """Check for 'assertRaises(Exception'. + r"""Check for 'assertRaises(Exception'. nova HACKING guide recommends not using assertRaises(Exception...): Do not use overly broad Exception type - N202 + + Okay: self.assertRaises(NovaException) + N202: self.assertRaises(Exception) """ if logical_line.startswith("self.assertRaises(Exception"): - yield 1, "NOVA N202: assertRaises Exception too broad" + yield 1, "N202: assertRaises Exception too broad" def nova_one_import_per_line(logical_line): - """Check for import format. + r"""Check for import format. nova HACKING guide recommends one import per line: Do not import more than one module per line Examples: - BAD: from nova.rpc.common import RemoteError, LOG - N301 + Okay: from nova.rpc.common import RemoteError + N301: from nova.rpc.common import RemoteError, LOG """ pos = logical_line.find(',') parts = logical_line.split() if (pos > -1 and (parts[0] == "import" or parts[0] == "from" and parts[2] == "import") and not is_import_exception(parts[1])): - yield pos, "NOVA N301: one import per line" + yield pos, "N301: one import per line" _missingImport = set([]) def nova_import_module_only(logical_line): - """Check for import module only. + r"""Check for import module only. nova HACKING guide recommends importing only modules: Do not import objects, only modules - N302 import only modules - N303 Invalid Import - N304 Relative Import + + Okay: from os import path + N302 from os.path import mkdir as mkdir2 + N303 import bubba + N304 import blueblue """ + # N302 import only modules + # N303 Invalid Import + # N304 Relative Import + + # TODO(sdague) actually get these tests working def importModuleCheck(mod, parent=None, added=False): - """ + """Import Module helper function. + If can't find module on first try, recursively check for relative imports """ @@ -193,10 +208,10 @@ def nova_import_module_only(logical_line): if added: sys.path.pop() added = False - return logical_line.find(mod), ("NOVA N304: No " + return logical_line.find(mod), ("N304: No " "relative imports. '%s' is a relative import" % logical_line) - return logical_line.find(mod), ("NOVA N302: import only " + return logical_line.find(mod), ("N302: import only " "modules. '%s' does not import a module" % logical_line) @@ -219,7 +234,7 @@ def nova_import_module_only(logical_line): except AttributeError: # Invalid import - return logical_line.find(mod), ("NOVA N303: Invalid import, " + return logical_line.find(mod), ("N303: Invalid import, " "AttributeError raised") # convert "from x import y" to " import x.y" @@ -240,78 +255,140 @@ def nova_import_module_only(logical_line): #TODO(jogo): import template: N305 -def nova_import_alphabetical(logical_line, line_number, lines): - """Check for imports in alphabetical order. +def nova_import_alphabetical(logical_line, blank_lines, previous_logical, + indent_level, previous_indent_level): + r"""Check for imports in alphabetical order. nova HACKING guide recommendation for imports: imports in human alphabetical order - N306 + + Okay: import os\nimport sys\n\nimport nova\nfrom nova import test + N306: import sys\nimport os """ # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(logical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2] - ).strip().lower().split() - # with or without "as y" - length = [2, 4] - if (len(split_line) in length and len(split_previous) in length and - split_line[0] == "import" and split_previous[0] == "import"): - if split_line[1] < split_previous[1]: - yield (0, "NOVA N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) + split_previous = import_normalize(previous_logical.strip()).lower().split() + + if blank_lines < 1 and indent_level == previous_indent_level: + length = [2, 4] + if (len(split_line) in length and len(split_previous) in length and + split_line[0] == "import" and split_previous[0] == "import"): + if split_line[1] < split_previous[1]: + yield (0, "N306: imports not in alphabetical order (%s, %s)" + % (split_previous[1], split_line[1])) def nova_import_no_db_in_virt(logical_line, filename): - if ("nova/virt" in filename and - not filename.endswith("fake.py") and - "nova import db" in logical_line): - yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*") + """Check for db calls from nova/virt + + As of grizzly-2 all the database calls have been removed from + nova/virt, and we want to keep it that way. + + N307 + """ + if "nova/virt" in filename and not filename.endswith("fake.py"): + if logical_line.startswith("from nova import db"): + yield (0, "N307: nova.db import not allowed in nova/virt/*") + + +def in_docstring_position(previous_logical): + return (previous_logical.startswith("def ") or + previous_logical.startswith("class ")) def nova_docstring_start_space(physical_line, previous_logical): - """Check for docstring not start with space. + r"""Check for docstring not start with space. nova HACKING guide recommendation for docstring: Docstring should not start with space - N401 + + Okay: def foo():\n '''This is good.''' + N401: def foo():\n ''' This is not.''' """ + # short circuit so that we don't fail on our own fail test + # when running under external pep8 + if physical_line.find("N401: def foo()") != -1: + return + # it's important that we determine this is actually a docstring, # and not a doc block used somewhere after the first line of a # function def - if (previous_logical.startswith("def ") or - previous_logical.startswith("class ")): - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) - if (pos != -1 and len(physical_line) > pos + 4): - if (physical_line[pos + 3] == ' '): - return (pos, "NOVA N401: docstring should not start with" + if in_docstring_position(previous_logical): + pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE]) + if pos != -1 and len(physical_line) > pos + 4: + if physical_line[pos + 3] == ' ': + return (pos, "N401: docstring should not start with" " a space") def nova_docstring_one_line(physical_line): - """Check one line docstring end. + r"""Check one line docstring end. nova HACKING guide recommendation for one line docstring: - A one line docstring looks like this and ends in a period. - N402 + A one line docstring looks like this and ends in punctuation. + + Okay: '''This is good.''' + N402: '''This is not''' + N402: '''Bad punctuation,''' """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end - if (pos != -1 and end and len(physical_line) > pos + 4): - if (physical_line[-5] not in ['.', '?', '!']): - return pos, "NOVA N402: one line docstring needs a period" + #TODO(jogo) make this apply to multi line docstrings as well + line = physical_line.lstrip() + if line.startswith('"') or line.startswith("'"): + pos = max([line.find(i) for i in START_DOCSTRING_TRIPLE]) # start + end = max([line[-4:-1] == i for i in END_DOCSTRING_TRIPLE]) # end -def nova_docstring_multiline_end(physical_line): - """Check multi line docstring end. + if pos != -1 and end and len(line) > pos + 4: + if line[-5] not in ['.', '?', '!']: + return pos, "N402: one line docstring needs punctuation." + + +def nova_docstring_multiline_end(physical_line, previous_logical): + r"""Check multi line docstring end. nova HACKING guide recommendation for docstring: Docstring should end on a new line - N403 + + Okay: '''foobar\nfoo\nbar\n''' + N403: def foo():\n'''foobar\nfoo\nbar\n d'''\n\n + """ + if in_docstring_position(previous_logical): + pos = max(physical_line.find(i) for i in END_DOCSTRING_TRIPLE) + if pos != -1 and len(physical_line) == pos + 4: + if physical_line.strip() not in START_DOCSTRING_TRIPLE: + return (pos, "N403: multi line docstring end on new line") + + +def nova_docstring_multiline_start(physical_line, previous_logical, tokens): + r"""Check multi line docstring start with summary. + + nova HACKING guide recommendation for docstring: + Docstring should start with A multi line docstring has a one-line summary + + Okay: '''foobar\nfoo\nbar\n''' + N404: def foo():\n'''\nfoo\nbar\n''' \n\n """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) == pos): - if (physical_line[pos + 3] == ' '): - return (pos, "NOVA N403: multi line docstring end on new line") + if in_docstring_position(previous_logical): + pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE]) + # start of docstring when len(tokens)==0 + if len(tokens) == 0 and pos != -1 and len(physical_line) == pos + 4: + if physical_line.strip() in START_DOCSTRING_TRIPLE: + return (pos, "N404: multi line docstring " + "should start with a summary") + + +def nova_no_cr(physical_line): + r"""Check that we only use newlines not cariage returns. + + Okay: import os\nimport sys + # pep8 doesn't yet replace \r in strings, will work on an + # upstream fix + N901 import os\r\nimport sys + """ + pos = physical_line.find('\r') + if pos != -1 and pos == (len(physical_line) - 2): + return (pos, "N901: Windows style line endings not allowed in code") FORMAT_RE = re.compile("%(?:" @@ -339,6 +416,7 @@ def check_i18n(): token_type, text, _, _, line = yield except GeneratorExit: return + if (token_type == tokenize.NAME and text == "_" and not line.startswith('def _(msg):')): @@ -361,22 +439,22 @@ def check_i18n(): if not format_string: raise LocalizationError(start, - "NOVA N701: Empty localization string") + "N701: Empty localization string") if token_type != tokenize.OP: raise LocalizationError(start, - "NOVA N701: Invalid localization call") + "N701: Invalid localization call") if text != ")": if text == "%": raise LocalizationError(start, - "NOVA N702: Formatting operation should be outside" + "N702: Formatting operation should be outside" " of localization method call") elif text == "+": raise LocalizationError(start, - "NOVA N702: Use bare string concatenation instead" + "N702: Use bare string concatenation instead" " of +") else: raise LocalizationError(start, - "NOVA N702: Argument to _ must be just a string") + "N702: Argument to _ must be just a string") format_specs = FORMAT_RE.findall(format_string) positional_specs = [(key, spec) for key, spec in format_specs @@ -384,17 +462,21 @@ def check_i18n(): # not spec means %%, key means %(smth)s if len(positional_specs) > 1: raise LocalizationError(start, - "NOVA N703: Multiple positional placeholders") + "N703: Multiple positional placeholders") def nova_localization_strings(logical_line, tokens): - """Check localization in line. - - N701: bad localization call - N702: complex expression instead of string as argument to _() - N703: multiple positional placeholders + r"""Check localization in line. + + Okay: _("This is fine") + Okay: _("This is also fine %s") + N701: _('') + N702: _("Bob" + " foo") + N702: _("Bob %s" % foo) + # N703 check is not quite right, disabled by removing colon + N703 _("%s %s" % (foo, bar)) """ - + # TODO(sdague) actually get these tests working gen = check_i18n() next(gen) try: @@ -466,18 +548,36 @@ def once_git_check_commit_title(): error = True return error +imports_on_separate_lines_N301_compliant = r""" + Imports should usually be on separate lines. + + Okay: import os\nimport sys + E401: import sys, os + + N301: from subprocess import Popen, PIPE + Okay: from myclas import MyClass + Okay: from foo.bar.yourclass import YourClass + Okay: import myclass + Okay: import foo.bar.yourclass + """ + if __name__ == "__main__": #include nova path sys.path.append(os.getcwd()) #Run once tests (not per line) once_error = once_git_check_commit_title() #NOVA error codes start with an N + pep8.SELFTEST_REGEX = re.compile(r'(Okay|[EWN]\d{3}):\s(.*)') pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}') add_nova() pep8.current_file = current_file pep8.readlines = readlines pep8.StyleGuide.excluded = excluded pep8.StyleGuide.input_dir = input_dir + # we need to kill this doctring otherwise the self tests fail + pep8.imports_on_separate_lines.__doc__ = \ + imports_on_separate_lines_N301_compliant + try: pep8._main() sys.exit(once_error) diff --git a/tools/lintstack.sh b/tools/lintstack.sh index 42c6a60b3..d8591d03d 100755 --- a/tools/lintstack.sh +++ b/tools/lintstack.sh @@ -20,7 +20,16 @@ # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) -GITHEAD=`git rev-parse HEAD` +# Get the current branch name. +GITHEAD=`git rev-parse --abbrev-ref HEAD` +if [[ "$GITHEAD" == "HEAD" ]]; then + # In detached head mode, get revision number instead + GITHEAD=`git rev-parse HEAD` + echo "Currently we are at commit $GITHEAD" +else + echo "Currently we are at branch $GITHEAD" +fi + cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then @@ -47,8 +56,4 @@ git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions -echo -echo "You are in detached HEAD mode. If you are a developer" -echo "and not very familiar with git, you might want to do" -echo "'git checkout branch-name' to go back to your branch." diff --git a/tools/pip-requires b/tools/pip-requires index 1845ba7dd..231d5cfe5 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -25,3 +25,4 @@ python-quantumclient>=2.1 python-glanceclient>=0.5.0,<2 python-keystoneclient>=0.2.0 stevedore>=0.7 +websockify diff --git a/tools/test-requires b/tools/test-requires index c1683fe27..bc279166e 100644 --- a/tools/test-requires +++ b/tools/test-requires @@ -11,5 +11,5 @@ pep8==1.3.3 pylint==0.25.2 python-subunit sphinx>=1.1.2 -testrepository>=0.0.12 -testtools>=0.9.22 +testrepository>=0.0.13 +testtools>=0.9.26 diff --git a/tools/xenserver/cleanup_sm_locks.py b/tools/xenserver/cleanup_sm_locks.py new file mode 100755 index 000000000..de455b076 --- /dev/null +++ b/tools/xenserver/cleanup_sm_locks.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# Copyright 2013 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Script to cleanup old XenServer /var/lock/sm locks. + +XenServer 5.6 and 6.0 do not appear to always cleanup locks when using a +FileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998) +locks laying around, builds will begin to fail because we can't create any +additional locks. This cleanup script is something we can run periodically as +a stop-gap measure until this is fixed upstream. + +This script should be run on the dom0 of the affected machine. +""" +import errno +import optparse +import os +import sys +import time + +BASE = '/var/lock/sm' + + +def _get_age_days(secs): + return float(time.time() - secs) / 86400 + + +def _parse_args(): + parser = optparse.OptionParser() + parser.add_option("-d", "--dry-run", + action="store_true", dest="dry_run", default=False, + help="don't actually remove locks") + parser.add_option("-l", "--limit", + action="store", type='int', dest="limit", + default=sys.maxint, + help="max number of locks to delete (default: no limit)") + parser.add_option("-v", "--verbose", + action="store_true", dest="verbose", default=False, + help="don't print status messages to stdout") + + options, args = parser.parse_args() + + try: + days_old = int(args[0]) + except (IndexError, ValueError): + parser.print_help() + sys.exit(1) + + return options, days_old + + +def main(): + options, days_old = _parse_args() + + if not os.path.exists(BASE): + print >> sys.stderr, "error: '%s' doesn't exist. Make sure you're"\ + " running this on the dom0." % BASE + sys.exit(1) + + lockpaths_removed = 0 + nspaths_removed = 0 + + for nsname in os.listdir(BASE)[:options.limit]: + nspath = os.path.join(BASE, nsname) + + if not os.path.isdir(nspath): + continue + + # Remove old lockfiles + removed = 0 + locknames = os.listdir(nspath) + for lockname in locknames: + lockpath = os.path.join(nspath, lockname) + lock_age_days = _get_age_days(os.path.getmtime(lockpath)) + if lock_age_days > days_old: + lockpaths_removed += 1 + removed += 1 + + if options.verbose: + print 'Removing old lock: %03d %s' % (lock_age_days, + lockpath) + + if not options.dry_run: + os.unlink(lockpath) + + # Remove empty namespace paths + if len(locknames) == removed: + nspaths_removed += 1 + + if options.verbose: + print 'Removing empty namespace: %s' % nspath + + if not options.dry_run: + try: + os.rmdir(nspath) + except OSError, e: + if e.errno == errno.ENOTEMPTY: + print >> sys.stderr, "warning: directory '%s'"\ + " not empty" % nspath + else: + raise + + if options.dry_run: + print "** Dry Run **" + + print "Total locks removed: ", lockpaths_removed + print "Total namespaces removed: ", nspaths_removed + + +if __name__ == '__main__': + main() diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py index eeaf978b8..27b89d510 100755 --- a/tools/xenserver/vm_vdi_cleaner.py +++ b/tools/xenserver/vm_vdi_cleaner.py @@ -42,6 +42,7 @@ cleaner_opts = [ ] CONF = cfg.CONF CONF.register_opts(cleaner_opts) +CONF.import_opt('verbose', 'nova.openstack.common.log') CONF.import_opt("resize_confirm_window", "nova.compute.manager") @@ -17,9 +17,10 @@ downloadcache = ~/cache/pip [testenv:pep8] deps=pep8==1.3.3 commands = - python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \ + python tools/hacking.py --doctest + python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \ --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg . - python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \ + python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \ --filename=nova* bin [testenv:pylint] @@ -37,7 +38,7 @@ commands = python tools/flakes.py nova # tests conflict with coverage. commands = python setup.py testr --coverage \ - --testr-args='^(?!.*test_coverage_ext).*$' + --testr-args='^(?!.*test.*coverage).*$' [testenv:venv] commands = {posargs} |