summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-06-08 13:27:00 -0500
committerTrey Morris <trey.morris@rackspace.com>2011-06-08 13:27:00 -0500
commita538fceb8cf6d692db0e3585b99ed10a17197960 (patch)
tree963ec4e5000c2b07088f78e7e746cff20a20179f
parent0438855659d89133e588dd4201956a901ed85787 (diff)
parent405945ad4801a9e434bcf9292bc01db354a9f2a7 (diff)
merged trunk again
-rwxr-xr-xbin/nova-manage55
-rw-r--r--doc/source/devref/distributed_scheduler.rst168
-rw-r--r--nova/api/direct.py2
-rw-r--r--nova/api/ec2/__init__.py1
-rw-r--r--nova/api/ec2/admin.py4
-rw-r--r--nova/api/ec2/cloud.py24
-rw-r--r--nova/api/openstack/common.py46
-rw-r--r--nova/api/openstack/extensions.py4
-rw-r--r--nova/api/openstack/images.py91
-rw-r--r--nova/api/openstack/views/limits.py9
-rw-r--r--nova/db/sqlalchemy/api.py46
-rw-r--r--nova/db/sqlalchemy/models.py1
-rw-r--r--nova/image/fake.py4
-rw-r--r--nova/image/glance.py12
-rw-r--r--nova/image/local.py6
-rw-r--r--nova/network/manager.py14
-rw-r--r--nova/tests/api/openstack/fakes.py33
-rw-r--r--nova/tests/api/openstack/test_common.py181
-rw-r--r--nova/tests/api/openstack/test_images.py227
-rw-r--r--nova/tests/image/test_glance.py2
-rw-r--r--nova/tests/test_cloud.py67
-rw-r--r--nova/tests/test_libvirt.py197
-rw-r--r--nova/tests/xenapi/stubs.py18
-rw-r--r--nova/twistd.py6
-rw-r--r--nova/utils.py18
-rw-r--r--nova/virt/libvirt/connection.py6
-rw-r--r--nova/virt/libvirt/firewall.py26
-rw-r--r--nova/virt/xenapi/fake.py6
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance2
-rwxr-xr-xrun_tests.sh14
-rw-r--r--tools/pip-requires1
31 files changed, 1008 insertions, 283 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 13cf4b8c6..e7164b4d2 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -96,6 +96,7 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
+flags.DECLARE('gateway_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag())
@@ -552,16 +553,20 @@ class FloatingIpCommands(object):
class NetworkCommands(object):
"""Class for managing networks."""
- def create(self, fixed_range=None, num_networks=None,
+ def create(self, label=None, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None,
- vpn_start=None, fixed_range_v6=None, label='public',
+ vpn_start=None, fixed_range_v6=None, gateway_v6=None,
flat_network_bridge=None, bridge_interface=None):
"""Creates fixed ips for host by range
- arguments: fixed_range=FLAG, [num_networks=FLAG],
+ arguments: label, fixed_range, [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
- [vpn_start=FLAG], [fixed_range_v6=FLAG],
- [label='public'], [flat_network_bridge=FLAG,
- [bridge_interface=FLAG]"""
+ [vpn_start=FLAG], [fixed_range_v6=FLAG], [gateway_v6=FLAG],
+ [flat_network_bridge=FLAG], [bridge_interface=FLAG]
+ """
+ if not label:
+ msg = _('a label (ex: public) is required to create networks.')
+ print msg
+ raise TypeError(msg)
if not fixed_range:
msg = _('Fixed range in the form of 10.0.0.0/8 is '
'required to create networks.')
@@ -581,17 +586,20 @@ class NetworkCommands(object):
flat_network_bridge = FLAGS.flat_network_bridge
if not bridge_interface:
bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
+ if not gateway_v6:
+ gateway_v6 = FLAGS.gateway_v6
net_manager = utils.import_object(FLAGS.network_manager)
try:
net_manager.create_networks(context.get_admin_context(),
+ label=label,
cidr=fixed_range,
num_networks=int(num_networks),
network_size=int(network_size),
vlan_start=int(vlan_start),
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
- label=label,
+ gateway_v6=gateway_v6,
bridge=flat_network_bridge,
bridge_interface=bridge_interface)
except ValueError, e:
@@ -1098,24 +1106,35 @@ class ImageCommands(object):
self._convert_images(machine_images)
+class ConfigCommands(object):
+ """Class for exposing the flags defined by flag_file(s)."""
+
+ def __init__(self):
+ pass
+
+ def list(self):
+ print FLAGS.FlagsIntoString()
+
+
CATEGORIES = [
- ('user', UserCommands),
('account', AccountCommands),
- ('project', ProjectCommands),
- ('role', RoleCommands),
- ('shell', ShellCommands),
- ('vpn', VpnCommands),
+ ('config', ConfigCommands),
+ ('db', DbCommands),
('fixed', FixedIpCommands),
+ ('flavor', InstanceTypeCommands),
('floating', FloatingIpCommands),
+ ('instance_type', InstanceTypeCommands),
+ ('image', ImageCommands),
('network', NetworkCommands),
- ('vm', VmCommands),
+ ('project', ProjectCommands),
+ ('role', RoleCommands),
('service', ServiceCommands),
- ('db', DbCommands),
+ ('shell', ShellCommands),
+ ('user', UserCommands),
+ ('version', VersionCommands),
+ ('vm', VmCommands),
('volume', VolumeCommands),
- ('instance_type', InstanceTypeCommands),
- ('image', ImageCommands),
- ('flavor', InstanceTypeCommands),
- ('version', VersionCommands)]
+ ('vpn', VpnCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst
new file mode 100644
index 000000000..eb6a1a03e
--- /dev/null
+++ b/doc/source/devref/distributed_scheduler.rst
@@ -0,0 +1,168 @@
+..
+ Copyright 2011 OpenStack LLC
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Distributed Scheduler
+=====
+
+The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
+
+But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
+
+This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found.
+
+So, how does this all work?
+
+This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this.
+
+Costs & Weights
+----------
+When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
+
+Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
+
+An example of some other costs might include selecting:
+* a GPU-based host over a standard CPU
+* a host with fast ethernet over a 10mbps line
+* a host that can run Windows instances
+* a host in the EU vs North America
+* etc
+
+This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
+
+nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler
+-----------
+As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions.
+
+Here is how it works:
+
+1. The compute nodes are filtered and the nodes remaining are weighed.
+1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
+1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
+2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
+3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
+4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
+
+`ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used.
+
+Filtering and Weighing
+------------
+The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible.
+
+Requesting a new instance
+------------
+Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
+
+`nova.compute.api.create()` performed the following actions:
+1. it validated all the fields passed into it.
+2. it created an entry in the `Instance` table for each instance requested
+3. it put one `run_instance` message in the scheduler queue for each instance requested
+4. the schedulers picked off the messages and decided which compute node should handle the request.
+5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
+6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid.
+
+Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
+
+The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
+
+For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
+1. it validates all the fields passed into it.
+2. it creates a single `reservation_id` for all of instances created. This is a UUID.
+3. it creates a single `run_instance` request in the scheduler queue
+4. a scheduler picks the message off the queue and works on it.
+5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
+6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
+7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
+8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
+9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
+10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
+
+The Catch
+-------------
+This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
+
+When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates.
+
+Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key`
+
+In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent.
+
+Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.zone_aware_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use.
+
+Reservation IDs
+---------------
+
+NOTE: The features described in this section are related to the up-coming 'merge-4' branch.
+
+The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
+
+NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
+
+We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
+
+Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter.
+
+`python-novaclient` will be extended to support both of these changes.
+
+Host Filter
+--------------
+
+As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms.
+
+The filter used is determined by the `--default_host_filter` flag, which points to a Python Class. By default this flag is set to `nova.scheduler.host_filter.AllHostsFilter` which simply returns all available hosts. But there are others:
+
+ * `nova.scheduler.host_filter.InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
+
+ * `nova.scheduler.host_filter.JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples.
+
+To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be.
+
+Cost Scheduler Weighing
+--------------
+Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled.
+
+Simple Zone Aware Scheduling
+--------------
+The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
+
+The `--scheduler_driver` flag is how you specify the scheduler class name.
+
+Flags
+--------------
+
+All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
+
+::
+ --allow_admin_api=true
+ --enable_zone_routing=true
+ --zone_name=zone1
+ --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b
+ --scheduler_driver=nova.scheduler.host_filter.HostFilterScheduler
+ --default_host_filter=nova.scheduler.host_filter.AllHostsFilter
+
+`--allow_admin_api` must be set for OS API to enable the new `/zones/*` commands.
+`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances.
+`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue.
+`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys.
+`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler`.
+`default_host_filter` is the host filter to be used for filtering candidate Compute nodes.
+
+Some optional flags which are handy for debugging are:
+
+::
+ --connection_type=fake
+ --verbose
+
+Using the `Fake` virtualization driver is handy when you're setting this stuff up so you're not dealing with a million possible issues at once. When things seem to working correctly, switch back to whatever hypervisor your deployment uses.
diff --git a/nova/api/direct.py b/nova/api/direct.py
index ea20042a7..ec79151b1 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -324,7 +324,7 @@ class Limited(object):
def __init__(self, proxy):
self._proxy = proxy
- if not self.__doc__:
+ if not self.__doc__: # pylint: disable=E0203
self.__doc__ = proxy.__doc__
if not self._allowed:
self._allowed = []
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 1915d007d..890d57fe7 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -242,6 +242,7 @@ class Authorizer(wsgi.Middleware):
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
+ 'ImportPublicKey': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index aeebd86fb..57d0a0339 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -324,7 +324,3 @@ class AdminController(object):
rv.append(host_dict(host, compute, instances, volume, volumes,
now))
return {'hosts': rv}
-
- def describe_host(self, _context, name, **_kwargs):
- """Returns status info for single node."""
- return host_dict(db.host_get(name))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index bd8ca813c..3c3f259b4 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -136,6 +136,13 @@ class CloudController(object):
return services[0]['availability_zone']
return 'unknown zone'
+ def _get_image_state(self, image):
+ # NOTE(vish): fallback status if image_state isn't set
+ state = image.get('status')
+ if state == 'active':
+ state = 'available'
+ return image['properties'].get('image_state', state)
+
def get_metadata(self, address):
ctxt = context.get_admin_context()
instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
@@ -895,6 +902,16 @@ class CloudController(object):
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
+ image = self._get_image(context, kwargs['image_id'])
+
+ if image:
+ image_state = self._get_image_state(image)
+ else:
+ raise exception.ImageNotFound(image_id=kwargs['image_id'])
+
+ if image_state != 'available':
+ raise exception.ApiError(_('Image must be available'))
+
instances = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
@@ -1010,11 +1027,8 @@ class CloudController(object):
get('image_location'), name)
else:
i['imageLocation'] = image['properties'].get('image_location')
- # NOTE(vish): fallback status if image_state isn't set
- state = image.get('status')
- if state == 'active':
- state = 'available'
- i['imageState'] = image['properties'].get('image_state', state)
+
+ i['imageState'] = self._get_image_state(image)
i['displayName'] = name
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 571e46766..ce7e2805c 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -35,6 +35,37 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
+def get_pagination_params(request):
+ """Return marker, limit tuple from request.
+
+ :param request: `wsgi.Request` possibly containing 'marker' and 'limit'
+ GET variables. 'marker' is the id of the last element
+ the client has seen, and 'limit' is the maximum number
+ of items to return. If 'limit' is not specified, 0, or
+ > max_limit, we default to max_limit. Negative values
+ for either marker or limit will cause
+ exc.HTTPBadRequest() exceptions to be raised.
+
+ """
+ try:
+ marker = int(request.GET.get('marker', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
+
+ try:
+ limit = int(request.GET.get('limit', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ if marker < 0:
+ raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
+
+ return(marker, limit)
+
+
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
"""
Return a slice of items according to requested offset and limit.
@@ -71,19 +102,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
+ (marker, limit) = get_pagination_params(request)
- try:
- marker = int(request.GET.get('marker', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
-
- try:
- limit = int(request.GET.get('limit', max_limit))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
-
- if limit < 0:
- raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+ if limit == 0:
+ limit = max_limit
limit = min(max_limit, limit)
start_index = 0
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 881b61733..54e17e23d 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -137,7 +137,7 @@ class ActionExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = ActionExtensionController(application)
- super(ActionExtensionResource, self).__init__(controller)
+ wsgi.Resource.__init__(self, controller)
def add_action(self, action_name, handler):
self.controller.add_action(action_name, handler)
@@ -164,7 +164,7 @@ class RequestExtensionResource(wsgi.Resource):
def __init__(self, application):
controller = RequestExtensionController(application)
- super(RequestExtensionResource, self).__init__(controller)
+ wsgi.Resource.__init__(self, controller)
def add_handler(self, handler):
self.controller.add_handler(handler)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 59d9e3082..5ffd8e96a 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -47,30 +47,6 @@ class Controller(object):
self._image_service = image_service or \
nova.image.get_default_image_service()
- def index(self, req):
- """Return an index listing of images available to the request.
-
- :param req: `wsgi.Request` object
- """
- context = req.environ['nova.context']
- filters = self._get_filters(req)
- images = self._image_service.index(context, filters)
- images = common.limited(images, req)
- builder = self.get_builder(req).build
- return dict(images=[builder(image, detail=False) for image in images])
-
- def detail(self, req):
- """Return a detailed index listing of images available to the request.
-
- :param req: `wsgi.Request` object.
- """
- context = req.environ['nova.context']
- filters = self._get_filters(req)
- images = self._image_service.detail(context, filters)
- images = common.limited(images, req)
- builder = self.get_builder(req).build
- return dict(images=[builder(image, detail=True) for image in images])
-
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
@@ -123,7 +99,7 @@ class Controller(object):
raise webob.exc.HTTPBadRequest()
try:
- server_id = body["image"]["serverId"]
+ server_id = self._server_id_from_req_data(body)
image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
@@ -135,6 +111,9 @@ class Controller(object):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError
+ def _server_id_from_req_data(self, data):
+ raise NotImplementedError()
+
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
@@ -144,6 +123,35 @@ class ControllerV10(Controller):
base_url = request.application_url
return images_view.ViewBuilderV10(base_url)
+ def index(self, req):
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ images = self._image_service.index(context, filters)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
+
+ def detail(self, req):
+ """Return a detailed index listing of images available to the request.
+
+ :param req: `wsgi.Request` object.
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ images = self._image_service.detail(context, filters)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
+
+ def _server_id_from_req_data(self, data):
+ return data['image']['serverId']
+
class ControllerV11(Controller):
"""Version 1.1 specific controller logic."""
@@ -153,6 +161,37 @@ class ControllerV11(Controller):
base_url = request.application_url
return images_view.ViewBuilderV11(base_url)
+ def index(self, req):
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ (marker, limit) = common.get_pagination_params(req)
+ images = self._image_service.index(
+ context, filters=filters, marker=marker, limit=limit)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
+
+ def detail(self, req):
+ """Return a detailed index listing of images available to the request.
+
+ :param req: `wsgi.Request` object.
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ (marker, limit) = common.get_pagination_params(req)
+ images = self._image_service.detail(
+ context, filters=filters, marker=marker, limit=limit)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
+
+ def _server_id_from_req_data(self, data):
+ return data['image']['serverRef']
+
def create_resource(version='1.0'):
controller = {
@@ -168,7 +207,7 @@ def create_resource(version='1.0'):
metadata = {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
- "serverId", "progress"],
+ "serverId", "progress", "serverRef"],
"link": ["rel", "type", "href"],
},
}
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
index e21c9f2fd..934b4921a 100644
--- a/nova/api/openstack/views/limits.py
+++ b/nova/api/openstack/views/limits.py
@@ -29,9 +29,6 @@ class ViewBuilder(object):
def _build_rate_limit(self, rate_limit):
raise NotImplementedError()
- def _build_absolute_limits(self, absolute_limit):
- raise NotImplementedError()
-
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
@@ -67,12 +64,6 @@ class ViewBuilder(object):
limits[name] = value
return limits
- def _build_rate_limits(self, rate_limits):
- raise NotImplementedError()
-
- def _build_rate_limit(self, rate_limit):
- raise NotImplementedError()
-
class ViewBuilderV10(ViewBuilder):
"""Openstack API v1.0 limits view builder."""
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 3b42dbed3..93c3f8897 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -60,9 +60,7 @@ def is_user_context(context):
def authorize_project_context(context, project_id):
- """Ensures that the request context has permission to access the
- given project.
- """
+ """Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project:
raise exception.NotAuthorized()
@@ -71,9 +69,7 @@ def authorize_project_context(context, project_id):
def authorize_user_context(context, user_id):
- """Ensures that the request context has permission to access the
- given user.
- """
+ """Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user:
raise exception.NotAuthorized()
@@ -89,9 +85,12 @@ def can_read_deleted(context):
def require_admin_context(f):
- """Decorator used to indicate that the method requires an
- administrator context.
+ """Decorator to require admin request context.
+
+ The first argument to the wrapped function must be the context.
+
"""
+
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
@@ -100,12 +99,19 @@ def require_admin_context(f):
def require_context(f):
- """Decorator used to indicate that the method requires either
- an administrator or normal user context.
+ """Decorator to require *any* user or admin context.
+
+ This does no authorization for user or project access matching, see
+ :py:func:`authorize_project_context` and
+ :py:func:`authorize_user_context`.
+
+ The first argument to the wrapped function must be the context.
+
"""
+
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
- raise exception.AdminRequired()
+ raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
@@ -1316,7 +1322,7 @@ def key_pair_destroy_all_by_user(context, user_id):
with session.begin():
session.query(models.KeyPair).\
filter_by(user_id=user_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1862,7 +1868,7 @@ def volume_destroy(context, volume_id):
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.ExportDevice).\
@@ -2020,7 +2026,7 @@ def snapshot_destroy(context, snapshot_id):
with session.begin():
session.query(models.Snapshot).\
filter_by(id=snapshot_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2175,17 +2181,17 @@ def security_group_destroy(context, security_group_id):
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2196,11 +2202,11 @@ def security_group_destroy_all(context, session=None):
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2893,7 +2899,7 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": value,
"instance_id": instance_id,
- "deleted": 0})
+ "deleted": False})
meta_ref.save(session=session)
return metadata
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index cc9ce64a0..3dbf483fa 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -46,6 +46,7 @@ class NovaBase(object):
updated_at = Column(DateTime, onupdate=utils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
+ metadata = None
def save(self, session=None):
"""Save this object."""
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 63966244b..70a5f0e22 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -99,11 +99,11 @@ class _FakeImageService(service.BaseImageService):
self.create(None, image5)
super(_FakeImageService, self).__init__()
- def index(self, context, filters=None):
+ def index(self, context, filters=None, marker=None, limit=None):
"""Returns list of images."""
return copy.deepcopy(self.images.values())
- def detail(self, context, filters=None):
+ def detail(self, context, filters=None, marker=None, limit=None):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
diff --git a/nova/image/glance.py b/nova/image/glance.py
index dec797619..61308431d 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -58,23 +58,27 @@ class GlanceImageService(service.BaseImageService):
else:
self.client = client
- def index(self, context, filters=None):
+ def index(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of images available."""
# NOTE(sirp): We need to use `get_images_detailed` and not
# `get_images` here because we need `is_public` and `properties`
# included so we can filter by user
filtered = []
- image_metas = self.client.get_images_detailed(filters=filters)
+ image_metas = self.client.get_images_detailed(filters=filters,
+ marker=marker,
+ limit=limit)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
filtered.append(meta_subset)
return filtered
- def detail(self, context, filters=None):
+ def detail(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of detailed image information."""
filtered = []
- image_metas = self.client.get_images_detailed(filters=filters)
+ image_metas = self.client.get_images_detailed(filters=filters,
+ marker=marker,
+ limit=limit)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
base_image_meta = self._translate_to_base(image_meta)
diff --git a/nova/image/local.py b/nova/image/local.py
index 677d5302b..c7dee4573 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -63,7 +63,8 @@ class LocalImageService(service.BaseImageService):
images.append(unhexed_image_id)
return images
- def index(self, context, *args, **kwargs):
+ def index(self, context, filters=None, marker=None, limit=None):
+ # TODO(blamar): Make use of filters, marker, and limit
filtered = []
image_metas = self.detail(context)
for image_meta in image_metas:
@@ -71,7 +72,8 @@ class LocalImageService(service.BaseImageService):
filtered.append(meta)
return filtered
- def detail(self, context, *args, **kwargs):
+ def detail(self, context, filters=None, marker=None, limit=None):
+ # TODO(blamar): Make use of filters, marker, and limit
images = []
for image_id in self._ids():
try:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 27859ec6f..4483422fd 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -91,6 +91,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
+flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
@@ -535,8 +536,9 @@ class NetworkManager(manager.SchedulerDependentManager):
network = self.db.fixed_ip_get_network(context, address)
self.driver.update_dhcp(context, network['id'])
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, bridge, bridge_interface, **kwargs):
+ def create_networks(self, context, label, cidr, num_networks,
+ network_size, cidr_v6, gateway_v6, bridge,
+ bridge_interface, *args, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
@@ -567,7 +569,13 @@ class NetworkManager(manager.SchedulerDependentManager):
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
- net['gateway_v6'] = str(project_net_v6[1])
+
+ if gateway_v6:
+ # use a pre-defined gateway if one is provided
+ net['gateway_v6'] = str(gateway_v6)
+ else:
+ net['gateway_v6'] = str(project_net_v6[1])
+
net['netmask_v6'] = str(project_net_v6.prefixlen())
if kwargs.get('vpn', False):
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 62e44ba96..7d632aaeb 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -142,7 +142,8 @@ def stub_out_networking(stubs):
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
- return 123
+ return dict(id='123', status='ACTIVE',
+ properties=dict(instance_id='123'))
stubs.Set(nova.compute.API, 'snapshot', snapshot)
@@ -168,12 +169,34 @@ def stub_out_glance(stubs, initial_fixtures=None):
def __init__(self, initial_fixtures):
self.fixtures = initial_fixtures or []
- def fake_get_images(self, filters=None):
+ def _filter_images(self, filters=None, marker=None, limit=None):
+ found = True
+ if marker:
+ found = False
+ if limit == 0:
+ limit = None
+
+ fixtures = []
+ count = 0
+ for f in self.fixtures:
+ if limit and count >= limit:
+ break
+ if found:
+ fixtures.append(f)
+ count = count + 1
+ if f['id'] == marker:
+ found = True
+
+ return fixtures
+
+ def fake_get_images(self, filters=None, marker=None, limit=None):
+ fixtures = self._filter_images(filters, marker, limit)
return [dict(id=f['id'], name=f['name'])
- for f in self.fixtures]
+ for f in fixtures]
- def fake_get_images_detailed(self, filters=None):
- return copy.deepcopy(self.fixtures)
+ def fake_get_images_detailed(self, filters=None,
+ marker=None, limit=None):
+ return self._filter_images(filters, marker, limit)
def fake_get_image_meta(self, image_id):
image = self._find_image(image_id)
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 8f57c5b67..9a9d9125c 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -24,7 +24,7 @@ import webob.exc
from webob import Request
from nova import test
-from nova.api.openstack.common import limited
+from nova.api.openstack import common
class LimiterTest(test.TestCase):
@@ -35,9 +35,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """
- Run before each test.
- """
+ """ Run before each test. """
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -45,127 +43,144 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """
- Test offset key works with 0.
- """
+ """ Test offset key works with 0. """
req = Request.blank('/?offset=0')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """
- Test offset key works with a medium sized number.
- """
+ """ Test offset key works with a medium sized number. """
req = Request.blank('/?offset=10')
- self.assertEqual(limited(self.tiny, req), [])
- self.assertEqual(limited(self.small, req), self.small[10:])
- self.assertEqual(limited(self.medium, req), self.medium[10:])
- self.assertEqual(limited(self.large, req), self.large[10:1010])
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), self.small[10:])
+ self.assertEqual(common.limited(self.medium, req), self.medium[10:])
+ self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """
- Test offset key works with a number over 1000 (max_limit).
- """
+ """ Test offset key works with a number over 1000 (max_limit). """
req = Request.blank('/?offset=1001')
- self.assertEqual(limited(self.tiny, req), [])
- self.assertEqual(limited(self.small, req), [])
- self.assertEqual(limited(self.medium, req), [])
- self.assertEqual(limited(self.large, req), self.large[1001:2001])
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), [])
+ self.assertEqual(common.limited(self.medium, req), [])
+ self.assertEqual(
+ common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """
- Test offset key works with a blank offset.
- """
+ """ Test offset key works with a blank offset. """
req = Request.blank('/?offset=')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """
- Test offset key works with a BAD offset.
- """
+ """ Test offset key works with a BAD offset. """
req = Request.blank(u'/?offset=\u0020aa')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """
- Test request with no offset or limit
- """
+ """ Test request with no offset or limit """
req = Request.blank('/')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """
- Test limit of zero.
- """
+ """ Test limit of zero. """
req = Request.blank('/?limit=0')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """
- Test limit of 10.
- """
+ """ Test limit of 10. """
req = Request.blank('/?limit=10')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium[:10])
- self.assertEqual(limited(self.large, req), self.large[:10])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium[:10])
+ self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """
- Test limit of 3000.
- """
+ """ Test limit of 3000. """
req = Request.blank('/?limit=3000')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """
- Test request with both limit and offset.
- """
+ """ Test request with both limit and offset. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req), items[1:4])
+ self.assertEqual(common.limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req), items[3:1003])
+ self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3&limit=1500')
- self.assertEqual(limited(items, req), items[3:1003])
+ self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req), [])
+ self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """
- Test a max_limit other than 1000.
- """
+ """ Test a max_limit other than 1000. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[1:4])
req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3&limit=2500')
- self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req, max_limit=2000), [])
+ self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """
- Test a negative limit.
- """
+ """ Test a negative limit. """
req = Request.blank('/?limit=-3000')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """
- Test a negative offset.
- """
+ """ Test a negative offset. """
req = Request.blank('/?offset=-30')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+
+class PaginationParamsTest(test.TestCase):
+ """
+ Unit tests for the `nova.api.openstack.common.get_pagination_params`
+ method which takes in a request object and returns 'marker' and 'limit'
+ GET params.
+ """
+
+ def test_no_params(self):
+ """ Test no params. """
+ req = Request.blank('/')
+ self.assertEqual(common.get_pagination_params(req), (0, 0))
+
+ def test_valid_marker(self):
+ """ Test valid marker param. """
+ req = Request.blank('/?marker=1')
+ self.assertEqual(common.get_pagination_params(req), (1, 0))
+
+ def test_invalid_marker(self):
+ """ Test invalid marker param. """
+ req = Request.blank('/?marker=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit(self):
+ """ Test valid limit param. """
+ req = Request.blank('/?limit=10')
+ self.assertEqual(common.get_pagination_params(req), (0, 10))
+
+ def test_invalid_limit(self):
+ """ Test invalid limit param. """
+ req = Request.blank('/?limit=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 93b402081..be777df9b 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -127,7 +127,7 @@ class _BaseImageServiceTests(test.TestCase):
@staticmethod
def _make_fixture(name):
- fixture = {'name': 'test image',
+ fixture = {'name': name,
'updated': None,
'created': None,
'status': None,
@@ -226,6 +226,127 @@ class GlanceImageServiceTest(_BaseImageServiceTests):
expected = {'name': 'test image', 'properties': {}}
self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+ def test_index_default_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context)
+ i = 0
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_index_marker(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, marker=ids[1])
+ self.assertEquals(len(image_metas), 8)
+ i = 2
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_index_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, limit=3)
+ self.assertEquals(len(image_metas), 3)
+
+ def test_index_marker_and_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, marker=ids[3], limit=1)
+ self.assertEquals(len(image_metas), 1)
+ i = 4
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_detail_marker(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, marker=ids[1])
+ self.assertEquals(len(image_metas), 8)
+ i = 2
+ for meta in image_metas:
+ expected = {
+ 'id': 'DONTCARE',
+ 'status': None,
+ 'is_public': True,
+ 'name': 'TestImage %d' % (i),
+ 'properties': {
+ 'updated': None,
+ 'created': None,
+ },
+ }
+
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_detail_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, limit=3)
+ self.assertEquals(len(image_metas), 3)
+
+ def test_detail_marker_and_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, marker=ids[3], limit=3)
+ self.assertEquals(len(image_metas), 3)
+ i = 4
+ for meta in image_metas:
+ expected = {
+ 'id': 'DONTCARE',
+ 'status': None,
+ 'is_public': True,
+ 'name': 'TestImage %d' % (i),
+ 'properties': {
+ 'updated': None, 'created': None},
+ }
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
class ImageControllerWithGlanceServiceTest(test.TestCase):
"""
@@ -248,6 +369,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
def tearDown(self):
"""Run after each test."""
@@ -713,7 +835,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'name': 'testname'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?name=testname')
@@ -727,7 +850,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE')
@@ -741,7 +865,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'property-test': '3'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?property-test=3')
@@ -755,7 +880,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -769,7 +895,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images')
@@ -783,7 +910,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'name': 'testname'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?name=testname')
@@ -797,7 +925,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE')
@@ -811,7 +940,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'property-test': '3'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?property-test=3')
@@ -825,7 +955,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -839,7 +970,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail')
@@ -870,6 +1002,79 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
+ def test_create_image(self):
+
+ body = dict(image=dict(serverId='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_image_no_server_id(self):
+
+ body = dict(image=dict(name='Backup 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_v1_1(self):
+
+ body = dict(image=dict(serverRef='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_image_v1_1_xml_serialization(self):
+
+ body = dict(image=dict(serverRef='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ req.headers["accept"] = "application/xml"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+ resp_xml = minidom.parseString(response.body.replace(" ", ""))
+ expected_href = "http://localhost/v1.1/images/123"
+ expected_image = minidom.parseString("""
+ <image
+ created="None"
+ id="123"
+ name="None"
+ serverRef="http://localhost/v1.1/servers/123"
+ status="ACTIVE"
+ updated="None"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="self"/>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/xml" />
+ </links>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected_image.toxml(), resp_xml.toxml())
+
+ def test_create_image_v1_1_no_server_ref(self):
+
+ body = dict(image=dict(name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
@classmethod
def _make_image_fixtures(cls):
image_id = 123
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 6d108d494..041da1e13 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -34,7 +34,7 @@ class StubGlanceClient(object):
def get_image_meta(self, image_id):
return self.images[image_id]
- def get_images_detailed(self, filters=None):
+ def get_images_detailed(self, filters=None, marker=None, limit=None):
return self.images.itervalues()
def get_image(self, image_id):
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 1bf1271c4..ba133c860 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -26,17 +26,16 @@ from eventlet import greenthread
from nova import context
from nova import crypto
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
-from nova import exception
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
-from nova.exception import NotFound
FLAGS = flags.FLAGS
@@ -68,7 +67,7 @@ class CloudTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
- 'type': 'machine'}}
+ 'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
@@ -290,7 +289,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}]
def fake_show_none(meh, context, id):
- raise NotFound
+ raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
# list all
@@ -308,7 +307,7 @@ class CloudTestCase(test.TestCase):
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
- self.assertRaises(NotFound, describe_images,
+ self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def test_describe_image_attribute(self):
@@ -445,6 +444,64 @@ class CloudTestCase(test.TestCase):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
+ def test_run_instances(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['imageId'], 'ami-00000001')
+ self.assertEqual(instance['displayName'], 'Server 1')
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+ self.assertEqual(instance['instanceState']['name'], 'networking')
+ self.assertEqual(instance['instanceType'], 'm1.small')
+
+ def test_run_instances_image_state_none(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_no_state(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
+ self.assertRaises(exception.ApiError, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_state_invalid(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_decrypt(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine', 'image_state': 'decrypting'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
+ self.assertRaises(exception.ApiError, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_status_active(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_stat_active(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}, 'status': 'active'}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
+
+ result = run_instances(self.context, **kwargs)
+ self.assertEqual(len(result['instancesSet']), 1)
+
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': 1,
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 687964094..8b4183164 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import eventlet
import mox
import os
@@ -125,6 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
class LibvirtConnTestCase(test.TestCase):
+
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
connection._late_load_cheetah()
@@ -207,6 +209,29 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn = fake
+ def fake_lookup(self, instance_name):
+
+ class FakeVirtDomain(object):
+
+ def snapshotCreateXML(self, *args):
+ return None
+
+ def XMLDesc(self, *args):
+ return """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ return FakeVirtDomain()
+
+ def fake_execute(self, *args):
+ open(args[-1], "a").close()
+
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
@@ -283,43 +308,56 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_container(instance_data)
def test_snapshot(self):
+ if not self.lazy_load_library_exists():
+ return
+
FLAGS.image_service = 'nova.image.fake.FakeImageService'
- # Only file-based instance storages are supported at the moment
- test_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- </devices>
- </domain>
- """
+ # Start test
+ image_service = utils.import_object(FLAGS.image_service)
- class FakeVirtDomain(object):
+ # Assuming that base image already exists in image_service
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = image_service.create(context, sent_meta)
- def __init__(self):
- pass
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.utils, 'execute')
+ connection.utils.execute = self.fake_execute
- def snapshotCreateXML(self, *args):
- return None
+ self.mox.ReplayAll()
- def XMLDesc(self, *args):
- return test_xml
+ conn = connection.LibvirtConnection(False)
+ conn.snapshot(instance_ref, recv_meta['id'])
- def fake_lookup(instance_name):
- if instance_name == instance_ref.name:
- return FakeVirtDomain()
+ snapshot = image_service.show(context, recv_meta['id'])
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
+ self.assertEquals(snapshot['status'], 'active')
+ self.assertEquals(snapshot['name'], snapshot_name)
+
+ def test_snapshot_no_image_architecture(self):
+ if not self.lazy_load_library_exists():
+ return
- def fake_execute(*args):
- # Touch filename to pass 'with open(out_path)'
- open(args[-1], "a").close()
+ FLAGS.image_service = 'nova.image.fake.FakeImageService'
# Start test
image_service = utils.import_object(FLAGS.image_service)
+ # Assign image_ref = 2 from nova/images/fakes for testing different
+ # base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = "2"
+
# Assuming that base image already exists in image_service
- instance_ref = db.instance_create(self.context, self.test_instance)
+ instance_ref = db.instance_create(self.context, test_instance)
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
snapshot_name = 'test-snap'
@@ -330,9 +368,9 @@ class LibvirtConnTestCase(test.TestCase):
recv_meta = image_service.create(context, sent_meta)
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
- connection.LibvirtConnection._conn.lookupByName = fake_lookup
+ connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.StubOutWithMock(connection.utils, 'execute')
- connection.utils.execute = fake_execute
+ connection.utils.execute = self.fake_execute
self.mox.ReplayAll()
@@ -724,6 +762,31 @@ class LibvirtConnTestCase(test.TestCase):
super(LibvirtConnTestCase, self).tearDown()
+class NWFilterFakes:
+ def __init__(self):
+ self.filters = {}
+
+ def nwfilterLookupByName(self, name):
+ if name in self.filters:
+ return self.filters[name]
+ raise libvirt.libvirtError('Filter Not Found')
+
+ def filterDefineXMLMock(self, xml):
+ class FakeNWFilterInternal:
+ def __init__(self, parent, name):
+ self.name = name
+ self.parent = parent
+
+ def undefine(self):
+ del self.parent.filters[self.name]
+ pass
+ tree = xml_to_tree(xml)
+ name = tree.get('name')
+ if name not in self.filters:
+ self.filters[name] = FakeNWFilterInternal(self, name)
+ return True
+
+
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
@@ -741,6 +804,20 @@ class IptablesFirewallTestCase(test.TestCase):
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ connection.libvirt = __import__('libvirt')
+ connection.libxml2 = __import__('libxml2')
+ return True
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -946,6 +1023,40 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
+ def test_unfilter_instance_undefines_nwfilter(self):
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ admin_ctxt = context.get_admin_context()
+
+ fakefilter = NWFilterFakes()
+ self.fw.nwfilter._conn.nwfilterDefineXML =\
+ fakefilter.filterDefineXMLMock
+ self.fw.nwfilter._conn.nwfilterLookupByName =\
+ fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance_ref()
+ inst_id = instance_ref['id']
+ instance = db.instance_get(self.context, inst_id)
+
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': inst_id})
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance)
+
+ # should undefine just the instance filter
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
@@ -1122,3 +1233,37 @@ class NWFilterTestCase(test.TestCase):
network_info,
"fake")
self.assertEquals(len(result), 3)
+
+ def test_unfilter_instance_undefines_nwfilters(self):
+ admin_ctxt = context.get_admin_context()
+
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ inst_id = instance_ref['id']
+
+ self.security_group = self.setup_and_return_security_group()
+
+ db.instance_add_security_group(self.context, inst_id,
+ self.security_group.id)
+
+ instance = db.instance_get(self.context, inst_id)
+
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': inst_id})
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance)
+
+ # should undefine 2 filters: instance and instance-secgroup
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
+
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 35308d95f..151a3e909 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -42,20 +42,6 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
- def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
- original_parent_uuid):
- from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
- #TODO: create fake SR record
- sr_ref = "fakesr"
- vdi_ref = create_vdi(name_label=name_label, read_only=False,
- sr_ref=sr_ref, sharable=False)
- vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
- vdi_uuid = vdi_rec['uuid']
- return vdi_uuid
-
- stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
-
def fake_parse_xmlrpc_value(val):
return val
@@ -251,10 +237,10 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def __init__(self, uri):
super(FakeSessionForMigrationTests, self).__init__(uri)
- def VDI_get_by_uuid(*args):
+ def VDI_get_by_uuid(self, *args):
return 'hurr'
- def VDI_resize_online(*args):
+ def VDI_resize_online(self, *args):
pass
def VM_start(self, _1, ref, _2, _3):
diff --git a/nova/twistd.py b/nova/twistd.py
index c07ed991f..15cf67825 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped):
self._absorbParameters()
self._absorbHandlers()
- super(TwistedOptionsToFlags, self).__init__()
+ wrapped.__init__(self)
def _absorbFlags(self):
twistd_flags = []
@@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped):
def parseArgs(self, *args):
# TODO(termie): figure out a decent way of dealing with args
#return
- super(TwistedOptionsToFlags, self).parseArgs(*args)
+ wrapped.parseArgs(self, *args)
def postOptions(self):
self._doHandlers()
- super(TwistedOptionsToFlags, self).postOptions()
+ wrapped.postOptions(self)
def __getitem__(self, key):
key = key.replace('-', '_')
diff --git a/nova/utils.py b/nova/utils.py
index b5aea54d0..be6fcd19f 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -142,24 +142,26 @@ def execute(*cmd, **kwargs):
env = os.environ.copy()
if addl_env:
env.update(addl_env)
+ _PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ stdin=_PIPE,
+ stdout=_PIPE,
+ stderr=_PIPE,
env=env)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
- obj.stdin.close()
- if obj.returncode:
- LOG.debug(_('Result was %s') % obj.returncode)
+ obj.stdin.close() # pylint: disable=E1101
+ _returncode = obj.returncode # pylint: disable=E1101
+ if _returncode:
+ LOG.debug(_('Result was %s') % _returncode)
if type(check_exit_code) == types.IntType \
- and obj.returncode != check_exit_code:
+ and _returncode != check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
- exit_code=obj.returncode,
+ exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index c491418ae..98cdff311 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -403,8 +403,7 @@ class LibvirtConnection(driver.ComputeDriver):
'is_public': False,
'status': 'active',
'name': snapshot['name'],
- 'properties': {'architecture':
- base['properties']['architecture'],
+ 'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
@@ -412,6 +411,9 @@ class LibvirtConnection(driver.ComputeDriver):
'ramdisk_id': instance['ramdisk_id'],
}
}
+ if 'architecture' in base['properties']:
+ arch = base['properties']['architecture']
+ metadata['properties']['architecture'] = arch
# Make the snapshot
snapshot_name = uuid.uuid4().hex
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 12727f2b1..84153fa1e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -285,8 +285,29 @@ class NWFilterFirewall(FirewallDriver):
tpool.execute(self._conn.nwfilterDefineXML, xml)
def unfilter_instance(self, instance):
- # Nothing to do
- pass
+ """Clear out the nwfilter rules."""
+ network_info = netutils.get_network_info(instance)
+ instance_name = instance.name
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+
+ try:
+ self._conn.nwfilterLookupByName(instance_filter_name).\
+ undefine()
+ except libvirt.libvirtError:
+ LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
+ 'for %(instance_name)s is not found.') % locals())
+
+ instance_secgroup_filter_name =\
+ '%s-secgroup' % (self._instance_filter_name(instance))
+
+ try:
+ self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\
+ .undefine()
+ except libvirt.libvirtError:
+ LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) '
+ 'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info=None):
"""
@@ -452,6 +473,7 @@ class IptablesFirewallDriver(FirewallDriver):
if self.instances.pop(instance['id'], None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
+ self.nwfilter.unfilter_instance(instance)
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
'filtered'), instance['id'])
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 165888cb2..113198689 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -340,10 +340,6 @@ class SessionBase(object):
return
db_ref['xenstore_data'][key] = None
- def network_get_all_records_where(self, _1, _2):
- # TODO (salvatore-orlando): filter table on _2
- return _db_content['network']
-
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if not 'xenstore_data' in db_ref:
@@ -354,7 +350,7 @@ class SessionBase(object):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
- def host_call_plugin(*args):
+ def host_call_plugin(self, *args):
return 'herp'
def network_get_all_records_where(self, _1, filter):
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 0c00d168b..46031ebe8 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -244,7 +244,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
conn = httplib.HTTPConnection(glance_host, glance_port)
# NOTE(sirp): httplib under python2.4 won't accept a file-like object
# to request
- conn.putrequest('PUT', '/images/%s' % image_id)
+ conn.putrequest('PUT', '/v1/images/%s' % image_id)
# NOTE(sirp): There is some confusion around OVF. Here's a summary of
# where we currently stand:
diff --git a/run_tests.sh b/run_tests.sh
index 9aa555484..c7bcd5d67 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -67,14 +67,11 @@ function run_pep8 {
srcfiles=`find bin -type f ! -name "nova.conf*"`
srcfiles+=" `find tools/*`"
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
- pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles}
+ # Just run PEP8 in current environment
+ ${wrapper} pep8 --repeat --show-pep8 --show-source \
+ --exclude=vcsversion.py ${srcfiles}
}
-if [ $just_pep8 -eq 1 ]; then
- run_pep8
- exit
-fi
-
NOSETESTS="python run_tests.py $noseargs"
if [ $never_venv -eq 0 ]
@@ -103,6 +100,11 @@ then
fi
fi
+if [ $just_pep8 -eq 1 ]; then
+ run_pep8
+ exit
+fi
+
run_tests || exit
# Also run pep8 if no options were provided.
diff --git a/tools/pip-requires b/tools/pip-requires
index 035e4347d..e81ef944a 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -34,4 +34,3 @@ coverage
nosexcover
GitPython
paramiko
-nova_adminclient