summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage18
-rwxr-xr-xdoc/generate_autodoc_index.sh1
-rw-r--r--doc/source/_static/jquery.tweet.js6
-rw-r--r--doc/source/_static/tweaks.css4
-rw-r--r--doc/source/api_ext/index.rst4
-rw-r--r--doc/source/community.rst12
-rw-r--r--doc/source/conf.py4
-rw-r--r--doc/source/conf_back.py2
-rw-r--r--doc/source/devref/filter_scheduler.rst18
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/network.rst2
-rw-r--r--doc/source/devref/threading.rst2
-rw-r--r--doc/source/index.rst8
-rw-r--r--doc/source/man/novamanage.rst5
-rw-r--r--doc/source/nova.concepts.rst10
-rw-r--r--doc/source/runnova/managing.images.rst2
-rw-r--r--doc/source/runnova/managing.instance.types.rst16
-rw-r--r--doc/source/runnova/monitoring.rst2
-rw-r--r--doc/source/runnova/network.flat.rst4
-rw-r--r--doc/source/runnova/network.vlan.rst24
-rw-r--r--doc/source/runnova/nova.manage.rst5
-rw-r--r--doc/source/runnova/vncconsole.rst2
-rw-r--r--doc/source/service.architecture.rst2
-rw-r--r--doc/source/vmwareapi_readme.rst452
-rw-r--r--etc/nova/nova.conf.sample1088
-rw-r--r--nova/api/metadata/handler.py21
-rw-r--r--nova/api/openstack/common.py27
-rw-r--r--nova/api/openstack/compute/contrib/flavorextradata.py7
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py4
-rw-r--r--nova/api/openstack/compute/limits.py4
-rw-r--r--nova/api/validator.py10
-rw-r--r--nova/auth/fakeldap.py6
-rw-r--r--nova/auth/manager.py174
-rw-r--r--nova/cert/manager.py5
-rw-r--r--nova/common/policy.py91
-rw-r--r--nova/compat/flagfile.py2
-rw-r--r--nova/compute/manager.py56
-rw-r--r--nova/db/sqlalchemy/models.py2
-rw-r--r--nova/flags.py3
-rw-r--r--nova/network/api.py20
-rw-r--r--nova/network/manager.py156
-rw-r--r--nova/network/model.py34
-rw-r--r--nova/network/quantum/manager.py4
-rw-r--r--nova/notifier/api.py36
-rw-r--r--nova/objectstore/s3server.py2
-rw-r--r--nova/openstack/common/cfg.py43
-rw-r--r--nova/policy.py10
-rw-r--r--nova/scheduler/driver.py1
-rw-r--r--nova/scheduler/least_cost.py15
-rw-r--r--nova/scheduler/manager.py11
-rw-r--r--nova/tests/api/ec2/test_cloud.py3
-rw-r--r--nova/tests/api/openstack/test_faults.py2
-rw-r--r--nova/tests/test_compute.py35
-rw-r--r--nova/tests/test_misc.py2
-rw-r--r--nova/tests/test_network.py2
-rw-r--r--nova/tests/test_xenapi.py4
-rw-r--r--nova/utils.py97
-rw-r--r--nova/virt/driver.py32
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/firewall.py4
-rw-r--r--nova/virt/libvirt/connection.py19
-rw-r--r--nova/virt/vmwareapi/vmops.py20
-rw-r--r--nova/virt/vmwareapi_conn.py2
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/pool.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py18
-rw-r--r--nova/virt/xenapi/vmops.py23
-rw-r--r--nova/virt/xenapi_conn.py4
-rw-r--r--nova/volume/driver.py19
-rw-r--r--nova/volume/manager.py6
-rw-r--r--nova/vsa/api.py8
-rwxr-xr-xrun_tests.sh2
-rw-r--r--tools/conf/create_conf.py36
-rwxr-xr-xtools/conf/generate_sample.sh (renamed from tools/conf/run.sh)8
-rw-r--r--tools/esx/guest_tool.py808
75 files changed, 2474 insertions, 1125 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 6f4cde35d..234f9d45e 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -78,6 +78,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova.compat import flagfile
from nova import context
from nova import crypto
from nova import db
@@ -2141,6 +2142,23 @@ class ConfigCommands(object):
if value is not None:
print '%s = %s' % (key, value)
+ @args('--infile', dest='file_path', metavar='<path>',
+ help='old-style flagfile to convert to config')
+ @args('--outfile', dest='file_path', metavar='<path>',
+ help='path for output file. Writes config'
+ 'to stdout if not specified.')
+ def convert(self, infile, outfile=None):
+ """Converts a flagfile and prints results to stdout."""
+ arg = '--flagfile=%s' % infile
+ with flagfile.handle_flagfiles_managed([arg]) as newargs:
+ with open(newargs[0].split('=')[1]) as configfile:
+ config = configfile.read()
+ if outfile:
+ with open(outfile, 'w') as configfile:
+ configfile.write(config)
+ else:
+ print config,
+
class GetLogCommands(object):
"""Get logging information"""
diff --git a/doc/generate_autodoc_index.sh b/doc/generate_autodoc_index.sh
index cdefb6c46..bdfa73a49 100755
--- a/doc/generate_autodoc_index.sh
+++ b/doc/generate_autodoc_index.sh
@@ -20,6 +20,7 @@ do
( cat <<EOF
${heading}
${underline}
+
.. automodule:: ${x}
:members:
:undoc-members:
diff --git a/doc/source/_static/jquery.tweet.js b/doc/source/_static/jquery.tweet.js
index c93fea876..79bf0bdb4 100644
--- a/doc/source/_static/jquery.tweet.js
+++ b/doc/source/_static/jquery.tweet.js
@@ -1,5 +1,5 @@
(function($) {
-
+
$.fn.tweet = function(o){
var s = {
username: ["seaofclouds"], // [string] required, unless you want to display our tweets. :) it can be an array, just do ["username1","username2","etc"]
@@ -17,9 +17,9 @@
loading_text: null, // [string] optional loading text, displayed while tweets load
query: null // [string] optional search query
};
-
+
if(o) $.extend(s, o);
-
+
$.fn.extend({
linkUrl: function() {
var returning = [];
diff --git a/doc/source/_static/tweaks.css b/doc/source/_static/tweaks.css
index 7c57c8f35..046ead840 100644
--- a/doc/source/_static/tweaks.css
+++ b/doc/source/_static/tweaks.css
@@ -81,7 +81,7 @@ http://twitter.com/necolas
Created: 02 March 2010
Version: 1.1 (21 October 2010)
-Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher
+Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher
------------------------------------------ */
/* THE SPEECH BUBBLE
------------------------------------------------------------------------------------------------------------------------------- */
@@ -96,7 +96,7 @@ Dual licensed under MIT and GNU GPLv2 © Nicolas Gallagher
border:5px solid #BC1518;
color:#333;
background:#fff;
-
+
/* css3 */
-moz-border-radius:10px;
-webkit-border-radius:10px;
diff --git a/doc/source/api_ext/index.rst b/doc/source/api_ext/index.rst
index 69858008b..f53a8051b 100644
--- a/doc/source/api_ext/index.rst
+++ b/doc/source/api_ext/index.rst
@@ -20,13 +20,13 @@ Compute API Extensions
In this section you will find extension reference information. If you need to write an extension's reference page, you can find an RST template in doc/source/api_ext/rst_extension_template.rst.
-The Compute API specification is published to http://docs.openstack.org/api and the source is found in https://github.com/openstack/compute-api. These extensions extend the core API.
+The Compute API specification is published to http://docs.openstack.org/api and the source is found in https://github.com/openstack/compute-api. These extensions extend the core API.
Extensions
----------
.. toctree::
:maxdepth: 3
-
+
ext_config_drive.rst
ext_floating_ip_dns.rst
ext_floating_ips.rst
diff --git a/doc/source/community.rst b/doc/source/community.rst
index ad829d2d7..1b36e7dcc 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -1,6 +1,6 @@
..
Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -27,8 +27,8 @@ How to Join the OpenStack Community
Our community welcomes all people interested in open source cloud computing, and there are no formal
membership requirements. The best way to join the community is to talk with others online or at a meetup
-and offer contributions through Launchpad, the wiki, or blogs. We welcome all types of contributions,
-from blueprint designs to documentation to testing to deployment scripts.
+and offer contributions through Launchpad, the wiki, or blogs. We welcome all types of contributions,
+from blueprint designs to documentation to testing to deployment scripts.
Contributing Code
-----------------
@@ -89,14 +89,14 @@ aggregation with your blog posts, there are instructions for `adding your blog <
Twitter
-------
-Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_. Also follow the
+Because all the cool kids do it: `@openstack <http://twitter.com/openstack>`_. Also follow the
`#openstack <http://search.twitter.com/search?q=%23openstack>`_ tag for relevant tweets.
OpenStack Docs Site
-------------------
-The `nova.openstack.org <http://nova.openstack.org>`_ site is geared towards developer documentation,
-and the `docs.openstack.org <http://docs.openstack.org>`_ site is intended for cloud administrators
+The `nova.openstack.org <http://nova.openstack.org>`_ site is geared towards developer documentation,
+and the `docs.openstack.org <http://docs.openstack.org>`_ site is intended for cloud administrators
who are standing up and running OpenStack Compute in production. You can contribute to the Docs Site
by using git and Gerrit and contributing to the openstack-manuals project at http://github.com/openstack/openstack-manuals.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index ec86a035f..78011f3a2 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -96,7 +96,7 @@ exclude_trees = []
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
-#add_module_names = True
+add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
@@ -110,7 +110,7 @@ modindex_common_prefix = ['nova.']
# -- Options for man page output -----------------------------------------------
-# Grouping the document tree for man pages.
+# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
diff --git a/doc/source/conf_back.py b/doc/source/conf_back.py
index 744f2f699..0ab67c3a3 100644
--- a/doc/source/conf_back.py
+++ b/doc/source/conf_back.py
@@ -104,7 +104,7 @@ modindex_common_prefix = ['nova.']
# -- Options for man page output -----------------------------------------------
-# Grouping the document tree for man pages.
+# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 64efb77b6..01359000f 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -1,5 +1,5 @@
..
- Copyright 2011 OpenStack LLC
+ Copyright 2011 OpenStack LLC
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -17,13 +17,13 @@
Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp
(OpenOffice Impress format) Illustrations are "exported" to png and then scaled
to 400x300 or 640x480 as needed and placed in the doc/source/images directory.
-
+
Filter Scheduler
=====================
The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Filter Scheduler, which selects Compute hosts from a logical partitioning of available hosts.
- .. image:: /images/dating_service.png
+ .. image:: /images/dating_service.png
The Filter Scheduler supports filtering and weighing to make informed decisions on where a new instance should be created.
@@ -31,9 +31,9 @@ So, how does this all work?
Costs & Weights
---------------
-When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
+When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
-Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
+Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost.
An example of some other costs might include selecting:
* a GPU-based host over a standard CPU
@@ -42,15 +42,15 @@ An example of some other costs might include selecting:
* a host in the EU vs North America
* etc
-This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
+This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly.
+
+ .. image:: /images/costs_weights.png
- .. image:: /images/costs_weights.png
-
Filtering and Weighing
----------------------
The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `FilterScheduler` are flexible and extensible.
- .. image:: /images/filtering.png
+ .. image:: /images/filtering.png
Host Filter
-----------
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 034e67fd4..13baa67fb 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -25,7 +25,7 @@ Programming HowTos and Tutorials
--------------------------------
.. toctree::
:maxdepth: 3
-
+
development.environment
unit_tests
addmethod.openstackapi
diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst
index eaf13e9ba..45a4bd184 100644
--- a/doc/source/devref/network.rst
+++ b/doc/source/devref/network.rst
@@ -18,7 +18,7 @@
Networking
==========
-.. todo::
+.. todo::
* document hardware specific commands (maybe in admin guide?) (todd)
* document a map between flags and managers/backends (todd)
diff --git a/doc/source/devref/threading.rst b/doc/source/devref/threading.rst
index 1c8eb5b95..356ca2f6f 100644
--- a/doc/source/devref/threading.rst
+++ b/doc/source/devref/threading.rst
@@ -8,7 +8,7 @@ through using the Python `eventlet <http://eventlet.net/>`_ and
Green threads use a cooperative model of threading: thread context
switches can only occur when specific eventlet or greenlet library calls are
made (e.g., sleep, certain I/O calls). From the operating system's point of
-view, each OpenStack service runs in a single thread.
+view, each OpenStack service runs in a single thread.
The use of green threads reduces the likelihood of race conditions, but does
not completely eliminate them. In some cases, you may need to use the
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ac481dee9..faaf1338e 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,6 +1,6 @@
..
Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -18,8 +18,8 @@
Welcome to Nova's documentation!
================================
-Nova is a cloud computing fabric controller, the main part of an IaaS system.
-Individuals and organizations can use Nova to host and manage their own cloud
+Nova is a cloud computing fabric controller, the main part of an IaaS system.
+Individuals and organizations can use Nova to host and manage their own cloud
computing systems. Nova originated as a project out of NASA Ames Research Laboratory.
Nova is written with the following design guidelines in mind:
@@ -33,7 +33,7 @@ Nova is written with the following design guidelines in mind:
This documentation is generated by the Sphinx toolkit and lives in the source
tree. Additional draft and project documentation on Nova and other components of OpenStack can
-be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_.
+be found on the `OpenStack wiki`_. Cloud administrators, refer to `docs.openstack.org`_.
Also see the :doc:`community` page for other ways to interact with the community.
diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst
index 397cc8e80..aaff13b88 100644
--- a/doc/source/man/novamanage.rst
+++ b/doc/source/man/novamanage.rst
@@ -170,9 +170,10 @@ Nova VPN
Nova Floating IPs
~~~~~~~~~~~~~~~~~
-``nova-manage floating create <host> <ip_range>``
+``nova-manage floating create <ip_range> [--pool <pool>] [--interface <interface>]``
- Creates floating IP addresses for the named host by the given range.
+ Creates floating IP addresses for the given range, optionally specifying
+ a floating pool and a network interface.
``nova-manage floating delete <ip_range>``
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index 0fcc83ed5..90bf3b64c 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -77,13 +77,13 @@ Concept: System Architecture
Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
- .. image:: images/Novadiagram.png
+ .. image:: images/Novadiagram.png
Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
-To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
+To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
- .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
+ .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>`_.
Concept: Storage
----------------
@@ -171,7 +171,7 @@ details.
Concept: Flags
--------------
-Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf.
+Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf.
Concept: Plugins
----------------
@@ -213,7 +213,7 @@ When launching VM instances, the project manager specifies which security groups
A security group can be thought of as a security profile or a security role - it promotes the good practice of managing firewalls by role, not by machine. For example, a user could stipulate that servers with the "webapp" role must be able to connect to servers with the "mysql" role on port 3306. Going further with the security profile analogy, an instance can be launched with membership of multiple security groups - similar to a server with multiple roles. Because all rules in security groups are ACCEPT rules, it's trivial to combine them.
-Each rule in a security group must specify the source of packets to be allowed, which can either be a subnet anywhere on the Internet (in CIDR notation, with 0.0.0./0 representing the entire Internet) or another security group. In the latter case, the source security group can be any user's group. This makes it easy to grant selective access to one user's instances from instances run by the user's friends, partners, and vendors.
+Each rule in a security group must specify the source of packets to be allowed, which can either be a subnet anywhere on the Internet (in CIDR notation, with 0.0.0./0 representing the entire Internet) or another security group. In the latter case, the source security group can be any user's group. This makes it easy to grant selective access to one user's instances from instances run by the user's friends, partners, and vendors.
The creation of rules with other security groups specified as sources helps users deal with dynamic IP addressing. Without this feature, the user would have had to adjust the security groups each time a new instance is launched. This practice would become cumbersome if an application running in Nova is very dynamic and elastic, for example scales up or down frequently.
diff --git a/doc/source/runnova/managing.images.rst b/doc/source/runnova/managing.images.rst
index a2e618602..be128045c 100644
--- a/doc/source/runnova/managing.images.rst
+++ b/doc/source/runnova/managing.images.rst
@@ -23,4 +23,4 @@ With Nova, you can manage images either using the built-in object store or using
* Ability to store and retrieve virtual machine images
* Ability to store and retrieve metadata about these virtual machine images
-Refer to http://glance.openstack.org for additional details. \ No newline at end of file
+Refer to http://glance.openstack.org for additional details. \ No newline at end of file
diff --git a/doc/source/runnova/managing.instance.types.rst b/doc/source/runnova/managing.instance.types.rst
index a575e16b7..c4a405a8d 100644
--- a/doc/source/runnova/managing.instance.types.rst
+++ b/doc/source/runnova/managing.instance.types.rst
@@ -16,12 +16,12 @@
Managing Instance Types and Flavors
===================================
-You can manage instance types and instance flavors using the nova-manage command-line interface coupled with the instance_type subcommand for nova-manage.
+You can manage instance types and instance flavors using the nova-manage command-line interface coupled with the instance_type subcommand for nova-manage.
What are Instance Types or Flavors ?
------------------------------------
-Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server".
+Instance types describe the compute, memory and storage capacity of nova computing instances. In layman terms, this is the size (in terms of vCPUs, RAM, etc.) of the virtual server that you will be launching. In the EC2 API, these are called by names such as "m1.large" or "m1.tiny", while the OpenStack API terms these "flavors" with names like "512 MB Server".
In Nova, "flavor" and "instance type" are equivalent terms. When you create an EC2 instance type, you are also creating a OpenStack API flavor. To reduce repetition, for the rest of this document I will refer to these as instance types.
@@ -34,8 +34,8 @@ In the current (Cactus) version of nova, instance types can only be created by t
Basic Management
----------------
-Instance types / flavor are managed through the nova-manage binary with
-the "instance_type" command and an appropriate subcommand. Note that you can also use
+Instance types / flavor are managed through the nova-manage binary with
+the "instance_type" command and an appropriate subcommand. Note that you can also use
the "flavor" command as a synonym for "instance_types".
To see all currently active instance types, use the list subcommand::
@@ -58,7 +58,7 @@ By default, the list subcommand only shows active instance types. To see all ins
m1.deleted: Memory: 2048MB, VCPUS: 1, Storage: 20GB, FlavorID: 2, Swap: 0GB, RXTX Quota: 0GB, RXTX Cap: 0MB, inactive
To create an instance type, use the "create" subcommand with the following positional arguments:
- * memory (expressed in megabytes)
+ * memory (expressed in megabytes)
* vcpu(s) (integer)
* local storage (expressed in gigabytes)
* flavorid (unique integer)
@@ -76,10 +76,10 @@ To delete an instance type, use the "delete" subcommand and specify the name::
# nova-manage instance_type delete m1.xxlarge
m1.xxlarge deleted
-Please note that the "delete" command only marks the instance type as
+Please note that the "delete" command only marks the instance type as
inactive in the database; it does not actually remove the instance type. This is done
-to preserve the instance type definition for long running instances (which may not
-terminate for months or years). If you are sure that you want to delete this instance
+to preserve the instance type definition for long running instances (which may not
+terminate for months or years). If you are sure that you want to delete this instance
type from the database, pass the "--purge" flag after the name::
# nova-manage instance_type delete m1.xxlarge --purge
diff --git a/doc/source/runnova/monitoring.rst b/doc/source/runnova/monitoring.rst
index 2c93c71b5..41997ee4e 100644
--- a/doc/source/runnova/monitoring.rst
+++ b/doc/source/runnova/monitoring.rst
@@ -1,6 +1,6 @@
..
Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/runnova/network.flat.rst b/doc/source/runnova/network.flat.rst
index 3d8680c6f..d7f37d004 100644
--- a/doc/source/runnova/network.flat.rst
+++ b/doc/source/runnova/network.flat.rst
@@ -32,8 +32,8 @@ IP addresses for VM instances are grabbed from a subnet specified by the network
* Each compute host creates a single bridge for all instances to use to attach to the external network.
* The networking configuration is injected into the instance before it is booted or it is obtained by a guest agent installed in the instance.
-
-Note that the configuration injection currently only works on linux-style systems that keep networking
+
+Note that the configuration injection currently only works on linux-style systems that keep networking
configuration in /etc/network/interfaces.
diff --git a/doc/source/runnova/network.vlan.rst b/doc/source/runnova/network.vlan.rst
index df19c7a80..551bdf3bf 100644
--- a/doc/source/runnova/network.vlan.rst
+++ b/doc/source/runnova/network.vlan.rst
@@ -22,21 +22,21 @@ VLAN Network Mode is the default mode for Nova. It provides a private network
segment for each project's instances that can be accessed via a dedicated
VPN connection from the Internet.
-In this mode, each project gets its own VLAN, Linux networking bridge, and subnet. The subnets are specified by the network administrator, and are assigned dynamically to a project when required. A DHCP Server is started for each VLAN to pass out IP addresses to VM instances from the subnet assigned to the project. All instances belonging to one project are bridged into the same VLAN for that project. The Linux networking bridges and VLANs are created by Nova when required, described in more detail in Nova VLAN Network Management Implementation.
+In this mode, each project gets its own VLAN, Linux networking bridge, and subnet. The subnets are specified by the network administrator, and are assigned dynamically to a project when required. A DHCP Server is started for each VLAN to pass out IP addresses to VM instances from the subnet assigned to the project. All instances belonging to one project are bridged into the same VLAN for that project. The Linux networking bridges and VLANs are created by Nova when required, described in more detail in Nova VLAN Network Management Implementation.
-..
+..
(this text revised above)
- Because the flat network and flat DhCP network are simple to understand and yet do not scale well enough for real-world cloud systems, this section focuses on the VLAN network implementation by the VLAN Network Manager.
+ Because the flat network and flat DhCP network are simple to understand and yet do not scale well enough for real-world cloud systems, this section focuses on the VLAN network implementation by the VLAN Network Manager.
- In the VLAN network mode, all the VM instances of a project are connected together in a VLAN with the specified private subnet. Each running VM instance is assigned an IP address within the given private subnet.
+ In the VLAN network mode, all the VM instances of a project are connected together in a VLAN with the specified private subnet. Each running VM instance is assigned an IP address within the given private subnet.
.. image:: /images/Novadiagram.png
:width: 790
-
-While network traffic between VM instances belonging to the same VLAN is always open, Nova can enforce isolation of network traffic between different projects by enforcing one VLAN per project.
-In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project. A public IP address could be associated with a project using the euca-allocate-address commands.
+While network traffic between VM instances belonging to the same VLAN is always open, Nova can enforce isolation of network traffic between different projects by enforcing one VLAN per project.
+
+In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project. A public IP address could be associated with a project using the euca-allocate-address commands.
This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
@@ -65,22 +65,22 @@ We also keep as a goal a common DMZ segment for support services, meaning these
Limitations
-----------
-We kept in mind some of these limitations:
+We kept in mind some of these limitations:
* Projects / cluster limited to available VLANs in switching infrastructure
* Requires VPN for access to project segment
Implementation
--------------
-Currently Nova segregates project VLANs using 802.1q VLAN tagging in the
-switching layer. Compute hosts create VLAN-specific interfaces and bridges
+Currently Nova segregates project VLANs using 802.1q VLAN tagging in the
+switching layer. Compute hosts create VLAN-specific interfaces and bridges
as required.
-The network nodes act as default gateway for project networks and contain
+The network nodes act as default gateway for project networks and contain
all of the routing and firewall rules implementing security groups. The
network node also handles DHCP to provide instance IPs for each project.
-VPN access is provided by running a small instance called CloudPipe
+VPN access is provided by running a small instance called CloudPipe
on the IP immediately following the gateway IP for each project. The
network node maps a dedicated public IP/port to the CloudPipe instance.
diff --git a/doc/source/runnova/nova.manage.rst b/doc/source/runnova/nova.manage.rst
index 4a948c29a..973ce5ae7 100644
--- a/doc/source/runnova/nova.manage.rst
+++ b/doc/source/runnova/nova.manage.rst
@@ -170,9 +170,10 @@ Nova VPN
Nova Floating IPs
~~~~~~~~~~~~~~~~~
-``nova-manage floating create <host> <ip_range>``
+``nova-manage floating create <ip_range> [--pool <pool>] [--interface <interface>]``
- Creates floating IP addresses for the named host by the given range.
+ Creates floating IP addresses for the given range, optionally specifying
+ a floating pool and a network interface.
``nova-manage floating delete <ip_range>``
diff --git a/doc/source/runnova/vncconsole.rst b/doc/source/runnova/vncconsole.rst
index d6bc16322..b85475c1e 100644
--- a/doc/source/runnova/vncconsole.rst
+++ b/doc/source/runnova/vncconsole.rst
@@ -152,7 +152,7 @@ Important Options
management ip on the same network as the proxies.
-.. todo::
+.. todo::
Reformat command line app instructions for commands using
``:command:``, ``:option:``, and ``.. program::``. (bug-947261)
diff --git a/doc/source/service.architecture.rst b/doc/source/service.architecture.rst
index 8fa1e3306..84751f808 100644
--- a/doc/source/service.architecture.rst
+++ b/doc/source/service.architecture.rst
@@ -32,7 +32,7 @@ Nova’s Cloud Fabric is composed of the following major components:
.. image:: /images/fabric.png
:width: 790
-API Server
+API Server
--------------------------------------------------
At the heart of the cloud framework is an API Server. This API Server makes command and control of the hypervisor, storage, and networking programmatically available to users in realization of the definition of cloud computing.
diff --git a/doc/source/vmwareapi_readme.rst b/doc/source/vmwareapi_readme.rst
index 6fdb0c9ed..9fead669c 100644
--- a/doc/source/vmwareapi_readme.rst
+++ b/doc/source/vmwareapi_readme.rst
@@ -1,226 +1,226 @@
-..
- Copyright (c) 2010 Citrix Systems, Inc.
- Copyright 2010 OpenStack LLC.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-VMware ESX/ESXi Server Support for OpenStack Compute
-====================================================
-
-Introduction
-------------
-A module named 'vmwareapi' is added to 'nova.virt' to add support of VMware ESX/ESXi hypervisor to OpenStack compute (Nova). Nova may now use VMware vSphere as a compute provider.
-
-The basic requirement is to support VMware vSphere 4.1 as a compute provider within Nova. As the deployment architecture, support both ESX and ESXi. VM storage is restricted to VMFS volumes on local drives. vCenter is not required by the current design, and is not currently supported. Instead, Nova Compute talks directly to ESX/ESXi.
-
-The 'vmwareapi' module is integrated with Glance, so that VM images can be streamed from there for boot on ESXi using Glance server for image storage & retrieval.
-
-Currently supports Nova's flat networking model (Flat Manager) & VLAN networking model.
-
-.. image:: images/vmwareapi_blockdiagram.jpg
-
-
-System Requirements
--------------------
-Following software components are required for building the cloud using OpenStack on top of ESX/ESXi Server(s):
-
-* OpenStack
-* Glance Image service
-* VMware ESX v4.1 or VMware ESXi(licensed) v4.1
-
-VMware ESX Requirements
------------------------
-* ESX credentials with administration/root privileges
-* Single local hard disk at the ESX host
-* An ESX Virtual Machine Port Group (For Flat Networking)
-* An ESX physical network adapter (For VLAN networking)
-* Need to enable "vSphere Web Access" in "vSphere client" UI at Configuration->Security Profile->Firewall
-
-Python dependencies
--------------------
-* suds-0.4
-
-* Installation procedure on Ubuntu/Debian
-
-::
-
- easy_install suds==0.4
-
-
-Configuration flags required for nova-compute
----------------------------------------------
-::
-
- --connection_type=vmwareapi
- --vmwareapi_host_ip=<VMware ESX Host IP>
- --vmwareapi_host_username=<VMware ESX Username>
- --vmwareapi_host_password=<VMware ESX Password>
- --vmwareapi_vlan_interface=<Physical ethernet adapter name in VMware ESX host for vlan networking E.g vmnic0> [Optional, only for VLAN Networking]
-
-
-Configuration flags required for nova-network
----------------------------------------------
-::
-
- --network_manager=nova.network.manager.FlatManager [or nova.network.manager.VlanManager]
- --flat_network_bridge=<ESX Virtual Machine Port Group> [Optional, only for Flat Networking]
-
-
-Configuration flags required for nova-console
----------------------------------------------
-::
-
- --console_manager=nova.console.vmrc_manager.ConsoleVMRCManager
- --console_driver=nova.console.vmrc.VMRCSessionConsole [Optional, only for OTP (One time Passwords) as against host credentials]
-
-
-Other flags
------------
-::
-
- --image_service=nova.image.glance.GlanceImageService
- --glance_host=<Glance Host>
- --vmwareapi_wsdl_loc=<http://<WEB SERVER>/vimService.wsdl>
-
-Note:- Due to a faulty wsdl being shipped with ESX vSphere 4.1 we need a working wsdl which can to be mounted on any webserver. Follow the below steps to download the SDK,
-
-* Go to http://www.vmware.com/support/developer/vc-sdk/
-* Go to section VMware vSphere Web Services SDK 4.0
-* Click "Downloads"
-* Enter VMware credentials when prompted for download
-* Unzip the downloaded file vi-sdk-4.0.0-xxx.zip
-* Go to SDK->WSDL->vim25 & host the files "vimService.wsdl" and "vim.wsdl" in a WEB SERVER
-* Set the flag "--vmwareapi_wsdl_loc" with url, "http://<WEB SERVER>/vimService.wsdl"
-
-
-Debug flag
-----------
-
-.. note::
-
- suds logging is very verbose and turned off by default. If you need to
- debug the VMware API calls, change the default_log_levels flag appropriately.
-
-
-VLAN Network Manager
---------------------
-VLAN network support is added through a custom network driver in the nova-compute node i.e "nova.network.vmwareapi_net" and it uses a Physical ethernet adapter on the VMware ESX/ESXi host for VLAN Networking (the name of the ethernet adapter is specified as vlan_interface flag in the nova-compute configuration flag) in the nova-compute node.
-
-Using the physical adapter name the associated Virtual Switch will be determined. In VMware ESX there can be only one Virtual Switch associated with a Physical adapter.
-
-When VM Spawn request is issued with a VLAN ID the work flow looks like,
-
-1. Check that a Physical adapter with the given name exists. If no, throw an error.If yes, goto next step.
-
-2. Check if a Virtual Switch is associated with the Physical ethernet adapter with vlan interface name. If no, throw an error. If yes, goto next step.
-
-3. Check if a port group with the network bridge name exists. If no, create a port group in the Virtual switch with the give name and VLAN id and goto step 6. If yes, goto next step.
-
-4. Check if the port group is associated with the Virtual Switch. If no, throw an error. If yes, goto next step.
-
-5. Check if the port group is associated with the given VLAN Id. If no, throw an error. If yes, goto next step.
-
-6. Spawn the VM using this Port Group as the Network Name for the VM.
-
-
-Guest console Support
----------------------
-| VMware VMRC console is a built-in console method providing graphical control of the VM remotely.
-|
-| VMRC Console types supported:
-| # Host based credentials
-| Not secure (Sends ESX admin credentials in clear text)
-|
-| # OTP (One time passwords)
-| Secure but creates multiple session entries in DB for each OpenStack console create request.
-| Console sessions created is can be used only once.
-|
-| Install browser based VMware ESX plugins/activex on the client machine to connect
-|
-| Windows:-
-| Internet Explorer:
-| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.exe
-|
-| Mozilla Firefox:
-| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.xpi
-|
-| Linux:-
-| Mozilla Firefox
-| 32-Bit Linux:
-| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x86.xpi
-|
-| 64-Bit Linux:
-| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x64.xpi
-|
-| OpenStack Console Details:
-| console_type = vmrc+credentials | vmrc+session
-| host = <VMware ESX Host>
-| port = <VMware ESX Port>
-| password = {'vm_id': <VMware VM ID>,'username':<VMware ESX Username>, 'password':<VMware ESX Password>} //base64 + json encoded
-|
-| Instantiate the plugin/activex object
-| # In Internet Explorer
-| <object id='vmrc' classid='CLSID:B94C2238-346E-4C5E-9B36-8CC627F35574'>
-| </object>
-|
-| # Mozilla Firefox and other browsers
-| <object id='vmrc' type='application/x-vmware-vmrc;version=2.5.0.0'>
-| </object>
-|
-| Open vmrc connection
-| # Host based credentials [type=vmrc+credentials]
-| <script type="text/javascript">
-| var MODE_WINDOW = 2;
-| var vmrc = document.getElementById('vmrc');
-| vmrc.connect(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware ESX Username>, <VMware ESX Password>, '', <VMware VM ID>, MODE_WINDOW);
-| </script>
-|
-| # OTP (One time passwords) [type=vmrc+session]
-| <script type="text/javascript">
-| var MODE_WINDOW = 2;
-| var vmrc = document.getElementById('vmrc');
-| vmrc.connectWithSession(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware VM ID>, <VMware ESX Password>, MODE_WINDOW);
-| </script>
-
-
-Assumptions
------------
-1. The VMware images uploaded to the image repositories have VMware Tools installed.
-
-
-FAQ
----
-
-1. What type of disk images are supported?
-
-* Only VMware VMDK's are currently supported and of that support is available only for thick disks, thin provisioned disks are not supported.
-
-
-2. How is IP address information injected into the guest?
-
-* IP address information is injected through 'machine.id' vmx parameter (equivalent to XenStore in XenServer). This information can be retrived inside the guest using VMware tools.
-
-
-3. What is the guest tool?
-
-* The guest tool is a small python script that should be run either as a service or added to system startup. This script configures networking on the guest. The guest tool is available at tools/esx/guest_tool.py
-
-
-4. What type of consoles are supported?
-
-* VMware VMRC based consoles are supported. There are 2 options for credentials one is OTP (Secure but creates multiple session entries in DB for each OpenStack console create request.) & other is host based credentials (It may not be secure as ESX credentials are transmitted as clear text).
-
-5. What does 'Vim' refer to as far as vmwareapi module is concerned?
-
-* Vim refers to VMware Virtual Infrastructure Methodology. This is not to be confused with "VIM" editor.
-
+..
+ Copyright (c) 2010 Citrix Systems, Inc.
+ Copyright 2010 OpenStack LLC.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+VMware ESX/ESXi Server Support for OpenStack Compute
+====================================================
+
+Introduction
+------------
+A module named 'vmwareapi' is added to 'nova.virt' to add support of VMware ESX/ESXi hypervisor to OpenStack compute (Nova). Nova may now use VMware vSphere as a compute provider.
+
+The basic requirement is to support VMware vSphere 4.1 as a compute provider within Nova. As the deployment architecture, support both ESX and ESXi. VM storage is restricted to VMFS volumes on local drives. vCenter is not required by the current design, and is not currently supported. Instead, Nova Compute talks directly to ESX/ESXi.
+
+The 'vmwareapi' module is integrated with Glance, so that VM images can be streamed from there for boot on ESXi using Glance server for image storage & retrieval.
+
+Currently supports Nova's flat networking model (Flat Manager) & VLAN networking model.
+
+.. image:: images/vmwareapi_blockdiagram.jpg
+
+
+System Requirements
+-------------------
+Following software components are required for building the cloud using OpenStack on top of ESX/ESXi Server(s):
+
+* OpenStack
+* Glance Image service
+* VMware ESX v4.1 or VMware ESXi(licensed) v4.1
+
+VMware ESX Requirements
+-----------------------
+* ESX credentials with administration/root privileges
+* Single local hard disk at the ESX host
+* An ESX Virtual Machine Port Group (For Flat Networking)
+* An ESX physical network adapter (For VLAN networking)
+* Need to enable "vSphere Web Access" in "vSphere client" UI at Configuration->Security Profile->Firewall
+
+Python dependencies
+-------------------
+* suds-0.4
+
+* Installation procedure on Ubuntu/Debian
+
+::
+
+ easy_install suds==0.4
+
+
+Configuration flags required for nova-compute
+---------------------------------------------
+::
+
+ --connection_type=vmwareapi
+ --vmwareapi_host_ip=<VMware ESX Host IP>
+ --vmwareapi_host_username=<VMware ESX Username>
+ --vmwareapi_host_password=<VMware ESX Password>
+ --vmwareapi_vlan_interface=<Physical ethernet adapter name in VMware ESX host for vlan networking E.g vmnic0> [Optional, only for VLAN Networking]
+
+
+Configuration flags required for nova-network
+---------------------------------------------
+::
+
+ --network_manager=nova.network.manager.FlatManager [or nova.network.manager.VlanManager]
+ --flat_network_bridge=<ESX Virtual Machine Port Group> [Optional, only for Flat Networking]
+
+
+Configuration flags required for nova-console
+---------------------------------------------
+::
+
+ --console_manager=nova.console.vmrc_manager.ConsoleVMRCManager
+ --console_driver=nova.console.vmrc.VMRCSessionConsole [Optional, only for OTP (One time Passwords) as against host credentials]
+
+
+Other flags
+-----------
+::
+
+ --image_service=nova.image.glance.GlanceImageService
+ --glance_host=<Glance Host>
+ --vmwareapi_wsdl_loc=<http://<WEB SERVER>/vimService.wsdl>
+
+Note:- Due to a faulty wsdl being shipped with ESX vSphere 4.1 we need a working wsdl which can to be mounted on any webserver. Follow the below steps to download the SDK,
+
+* Go to http://www.vmware.com/support/developer/vc-sdk/
+* Go to section VMware vSphere Web Services SDK 4.0
+* Click "Downloads"
+* Enter VMware credentials when prompted for download
+* Unzip the downloaded file vi-sdk-4.0.0-xxx.zip
+* Go to SDK->WSDL->vim25 & host the files "vimService.wsdl" and "vim.wsdl" in a WEB SERVER
+* Set the flag "--vmwareapi_wsdl_loc" with url, "http://<WEB SERVER>/vimService.wsdl"
+
+
+Debug flag
+----------
+
+.. note::
+
+ suds logging is very verbose and turned off by default. If you need to
+ debug the VMware API calls, change the default_log_levels flag appropriately.
+
+
+VLAN Network Manager
+--------------------
+VLAN network support is added through a custom network driver in the nova-compute node i.e "nova.network.vmwareapi_net" and it uses a Physical ethernet adapter on the VMware ESX/ESXi host for VLAN Networking (the name of the ethernet adapter is specified as vlan_interface flag in the nova-compute configuration flag) in the nova-compute node.
+
+Using the physical adapter name the associated Virtual Switch will be determined. In VMware ESX there can be only one Virtual Switch associated with a Physical adapter.
+
+When VM Spawn request is issued with a VLAN ID the work flow looks like,
+
+1. Check that a Physical adapter with the given name exists. If no, throw an error.If yes, goto next step.
+
+2. Check if a Virtual Switch is associated with the Physical ethernet adapter with vlan interface name. If no, throw an error. If yes, goto next step.
+
+3. Check if a port group with the network bridge name exists. If no, create a port group in the Virtual switch with the give name and VLAN id and goto step 6. If yes, goto next step.
+
+4. Check if the port group is associated with the Virtual Switch. If no, throw an error. If yes, goto next step.
+
+5. Check if the port group is associated with the given VLAN Id. If no, throw an error. If yes, goto next step.
+
+6. Spawn the VM using this Port Group as the Network Name for the VM.
+
+
+Guest console Support
+---------------------
+| VMware VMRC console is a built-in console method providing graphical control of the VM remotely.
+|
+| VMRC Console types supported:
+| # Host based credentials
+| Not secure (Sends ESX admin credentials in clear text)
+|
+| # OTP (One time passwords)
+| Secure but creates multiple session entries in DB for each OpenStack console create request.
+| Console sessions created is can be used only once.
+|
+| Install browser based VMware ESX plugins/activex on the client machine to connect
+|
+| Windows:-
+| Internet Explorer:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.exe
+|
+| Mozilla Firefox:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-win32-x86.xpi
+|
+| Linux:-
+| Mozilla Firefox
+| 32-Bit Linux:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x86.xpi
+|
+| 64-Bit Linux:
+| https://<VMware ESX Host>/ui/plugin/vmware-vmrc-linux-x64.xpi
+|
+| OpenStack Console Details:
+| console_type = vmrc+credentials | vmrc+session
+| host = <VMware ESX Host>
+| port = <VMware ESX Port>
+| password = {'vm_id': <VMware VM ID>,'username':<VMware ESX Username>, 'password':<VMware ESX Password>} //base64 + json encoded
+|
+| Instantiate the plugin/activex object
+| # In Internet Explorer
+| <object id='vmrc' classid='CLSID:B94C2238-346E-4C5E-9B36-8CC627F35574'>
+| </object>
+|
+| # Mozilla Firefox and other browsers
+| <object id='vmrc' type='application/x-vmware-vmrc;version=2.5.0.0'>
+| </object>
+|
+| Open vmrc connection
+| # Host based credentials [type=vmrc+credentials]
+| <script type="text/javascript">
+| var MODE_WINDOW = 2;
+| var vmrc = document.getElementById('vmrc');
+| vmrc.connect(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware ESX Username>, <VMware ESX Password>, '', <VMware VM ID>, MODE_WINDOW);
+| </script>
+|
+| # OTP (One time passwords) [type=vmrc+session]
+| <script type="text/javascript">
+| var MODE_WINDOW = 2;
+| var vmrc = document.getElementById('vmrc');
+| vmrc.connectWithSession(<VMware ESX Host> + ':' + <VMware ESX Port>, <VMware VM ID>, <VMware ESX Password>, MODE_WINDOW);
+| </script>
+
+
+Assumptions
+-----------
+1. The VMware images uploaded to the image repositories have VMware Tools installed.
+
+
+FAQ
+---
+
+1. What type of disk images are supported?
+
+* Only VMware VMDK's are currently supported and of that support is available only for thick disks, thin provisioned disks are not supported.
+
+
+2. How is IP address information injected into the guest?
+
+* IP address information is injected through 'machine.id' vmx parameter (equivalent to XenStore in XenServer). This information can be retrived inside the guest using VMware tools.
+
+
+3. What is the guest tool?
+
+* The guest tool is a small python script that should be run either as a service or added to system startup. This script configures networking on the guest. The guest tool is available at tools/esx/guest_tool.py
+
+
+4. What type of consoles are supported?
+
+* VMware VMRC based consoles are supported. There are 2 options for credentials one is OTP (Secure but creates multiple session entries in DB for each OpenStack console create request.) & other is host based credentials (It may not be secure as ESX credentials are transmitted as clear text).
+
+5. What does 'Vim' refer to as far as vmwareapi module is concerned?
+
+* Vim refers to VMware Virtual Infrastructure Methodology. This is not to be confused with "VIM" editor.
+
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
new file mode 100644
index 000000000..ba99c4287
--- /dev/null
+++ b/etc/nova/nova.conf.sample
@@ -0,0 +1,1088 @@
+####################
+# nova.conf sample #
+####################
+
+[DEFAULT]
+
+######### defined in nova.flags #########
+
+###### (BoolOpt) Allow destination machine to match source for resize. Useful when testing in single-host environments.
+# allow_resize_to_same_host=false
+###### (StrOpt) File name for the paste.deploy config for nova-api
+# api_paste_config="api-paste.ini"
+###### (IntOpt) Seconds for auth tokens to linger
+# auth_token_ttl=3600
+###### (StrOpt) AWS Access ID
+# aws_access_key_id="admin"
+###### (StrOpt) AWS Access Key
+# aws_secret_access_key="admin"
+###### (IntOpt) interval to pull bandwidth usage info
+# bandwith_poll_interval=600
+###### (BoolOpt) Cache glance images locally
+# cache_images=true
+###### (StrOpt) Manager for cert
+# cert_manager="nova.cert.manager.CertManager"
+###### (StrOpt) the topic cert nodes listen on
+# cert_topic="cert"
+###### (StrOpt) The compute API class to use
+# compute_api_class="nova.compute.api.API"
+###### (StrOpt) Manager for compute
+# compute_manager="nova.compute.manager.ComputeManager"
+###### (StrOpt) the topic compute nodes listen on
+# compute_topic="compute"
+###### (MultiStrOpt) Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. The default files used are: []
+###### (StrOpt) libvirt, xenapi or fake
+# connection_type=<None>
+###### (StrOpt) Manager for console proxy
+# console_manager="nova.console.manager.ConsoleProxyManager"
+###### (StrOpt) the topic console proxy nodes listen on
+# console_topic="console"
+###### (StrOpt) the main exchange to connect to
+# control_exchange="nova"
+###### (BoolOpt) Print debugging output
+# debug=false
+###### (StrOpt) Name of network to use to set access ips for instances
+# default_access_ip_network_name=<None>
+###### (StrOpt) The default format a ephemeral_volume will be formatted with on creation.
+# default_ephemeral_format=<None>
+###### (StrOpt) default image to use, testing only
+# default_image="ami-11111"
+###### (StrOpt) default instance type to use, testing only
+# default_instance_type="m1.small"
+###### (StrOpt) default project for openstack
+# default_project="openstack"
+###### (StrOpt) zone to use when user doesnt specify one
+# default_schedule_zone=<None>
+###### (StrOpt) default instance type for VSA instances
+# default_vsa_instance_type="m1.small"
+###### (StrOpt) internal ip of api server
+# ec2_dmz_host="$my_ip"
+###### (StrOpt) ip of api server
+# ec2_host="$my_ip"
+###### (StrOpt) suffix for ec2
+# ec2_path="/services/Cloud"
+###### (IntOpt) cloud controller port
+# ec2_port=8773
+###### (StrOpt) prefix for ec2
+# ec2_scheme="http"
+###### (BoolOpt) Allows use of instance password during server creation
+# enable_instance_password=true
+###### (ListOpt) list of APIs to enable by default
+# enabled_apis="ec2,osapi_compute,osapi_volume,metadata"
+###### (BoolOpt) should we use fake network devices and addresses
+# fake_network=false
+###### (BoolOpt) use a fake rabbit
+# fake_rabbit=false
+###### (StrOpt) Firewall driver (defaults to iptables)
+# firewall_driver="nova.virt.firewall.IptablesFirewallDriver"
+###### (StrOpt) DNS Manager for floating IPs
+# floating_ip_dns_manager="nova.network.dns_driver.DNSDriver"
+###### (ListOpt) glance api servers available to nova (host:port)
+# glance_api_servers="$glance_host:$glance_port"
+###### (StrOpt) default glance host
+# glance_host="$my_ip"
+###### (IntOpt) Number retries when downloading an image from glance
+# glance_num_retries=0
+###### (IntOpt) default glance port
+# glance_port=9292
+###### (StrOpt) Name of this node. This can be an opaque identifier. It is not necessarily a hostname, FQDN, or IP address.
+# host="firefly-2.local"
+###### (StrOpt) The service to use for retrieving and searching images.
+# image_service="nova.image.glance.GlanceImageService"
+###### (StrOpt) DNS Zone for instance IPs
+# instance_dns_domain=""
+###### (StrOpt) DNS Manager for instance IPs
+# instance_dns_manager="nova.network.dns_driver.DNSDriver"
+###### (StrOpt) time period to generate instance usages for.
+# instance_usage_audit_period="month"
+###### (ListOpt) Host reserved for specific images
+# isolated_hosts=""
+###### (ListOpt) Images to run on isolated host
+# isolated_images=""
+###### (StrOpt) Directory for lock files
+# lock_path="/Users/vishvananda/cache/stack/nova/nova/../"
+###### (StrOpt) If this option is specified, the logging configuration file specified is used and overrides any other logging options specified. Please see the Python logging module documentation for details on logging configuration files.
+# log-config=<None>
+###### (StrOpt) Format string for %(asctime)s in log records. Default: %default
+# log-date-format="%Y-%m-%d %H:%M:%S"
+###### (StrOpt) (Optional) The directory to keep log files in (will be prepended to --logfile)
+# log-dir=<None>
+###### (StrOpt) (Optional) Name of log file to output to. If not set, logging will go to stdout.
+# log-file=<None>
+###### (StrOpt) A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: %default
+# log-format="%(asctime)s %(levelname)8s [%(name)s] %(message)s"
+###### (StrOpt) output to a per-service log file in named directory
+# logdir=<None>
+###### (StrOpt) output to named file
+# logfile=<None>
+###### (StrOpt) Default file mode of the logs.
+# logfile_mode="0644"
+###### (IntOpt) maxinum VCs in a VSA
+# max_vcs_in_vsa=32
+###### (ListOpt) Memcached servers or None for in process cache.
+# memcached_servers=<None>
+###### (StrOpt) ip of metadata server
+# metadata_host="$my_ip"
+###### (IntOpt) Metadata API port
+# metadata_port=8775
+###### (BoolOpt) Whether to log monkey patching
+# monkey_patch=false
+###### (ListOpt) List of modules/decorators to monkey patch
+# monkey_patch_modules="nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator"
+###### (StrOpt) host ip address
+# my_ip="192.168.1.22"
+###### (StrOpt) The network API class to use
+# network_api_class="nova.network.api.API"
+###### (StrOpt) Driver to use for network creation
+# network_driver="nova.network.linux_net"
+###### (StrOpt) Manager for network
+# network_manager="nova.network.manager.VlanManager"
+###### (StrOpt) the topic network nodes listen on
+# network_topic="network"
+###### (StrOpt) availability zone of this node
+# node_availability_zone="nova"
+###### (StrOpt) Default driver for sending notifications
+# notification_driver="nova.notifier.no_op_notifier"
+###### (StrOpt) kernel image that indicates not to use a kernel, but to use a raw disk image instead
+# null_kernel="nokernel"
+###### (ListOpt) Specify list of extensions to load when using osapi_compute_extension option with nova.api.openstack.compute.contrib.select_extensions
+# osapi_compute_ext_list=""
+###### (MultiStrOpt) osapi compute extension to load
+# osapi_compute_extension="nova.api.openstack.compute.contrib.standard_extensions"
+###### (StrOpt) Base URL that will be presented to users in links to the Openstack Compute API
+# osapi_compute_link_prefix=<None>
+###### (StrOpt) Base URL that will be presented to users in links to glance resources
+# osapi_glance_link_prefix=<None>
+###### (IntOpt) max number of items returned in a collection response
+# osapi_max_limit=1000
+###### (StrOpt) suffix for openstack
+# osapi_path="/v1.1/"
+###### (StrOpt) prefix for openstack
+# osapi_scheme="http"
+###### (ListOpt) Specify list of extensions to load when using osapi_volume_extension option with nova.api.openstack.volume.contrib.select_extensions
+# osapi_volume_ext_list=""
+###### (MultiStrOpt) osapi volume extension to load
+# osapi_volume_extension="nova.api.openstack.volume.contrib.standard_extensions"
+###### (IntOpt) Length of generated instance admin passwords
+# password_length=12
+###### (BoolOpt) use durable queues
+# rabbit_durable_queues=false
+###### (StrOpt) rabbit host
+# rabbit_host="localhost"
+###### (IntOpt) maximum rabbit connection attempts (0=try forever)
+# rabbit_max_retries=0
+###### (StrOpt) rabbit password
+# rabbit_password="guest"
+###### (IntOpt) rabbit port
+# rabbit_port=5672
+###### (IntOpt) rabbit connection retry backoff in seconds
+# rabbit_retry_backoff=2
+###### (IntOpt) rabbit connection retry interval to start
+# rabbit_retry_interval=1
+###### (BoolOpt) connect over SSL
+# rabbit_use_ssl=false
+###### (StrOpt) rabbit userid
+# rabbit_userid="guest"
+###### (StrOpt) rabbit virtual host
+# rabbit_virtual_host="/"
+###### (IntOpt) Interval in seconds for reclaiming deleted instances
+# reclaim_instance_interval=0
+###### (ListOpt) list of region=fqdn pairs separated by commas
+# region_list=""
+###### (BoolOpt) Whether to start guests that were running before the host rebooted
+# resume_guests_state_on_host_boot=false
+###### (StrOpt) Command prefix to use for running commands as root
+# root_helper="sudo"
+###### (StrOpt) s3 dmz ip (for instances)
+# s3_dmz="$my_ip"
+###### (StrOpt) s3 host (for infrastructure)
+# s3_host="$my_ip"
+###### (IntOpt) s3 port
+# s3_port=3333
+###### (StrOpt) Manager for scheduler
+# scheduler_manager="nova.scheduler.manager.SchedulerManager"
+###### (StrOpt) the topic scheduler nodes listen on
+# scheduler_topic="scheduler"
+###### (StrOpt) security group handler class
+# security_group_handler="nova.network.quantum.sg.NullSecurityGroupHandler"
+###### (IntOpt) maximum time since last check-in for up service
+# service_down_time=60
+###### (StrOpt) connection string for sql database
+# sql_connection="sqlite:///$state_path/$sqlite_db"
+###### (IntOpt) timeout for idle sql database connections
+# sql_idle_timeout=3600
+###### (IntOpt) sql connection attempts
+# sql_max_retries=12
+###### (IntOpt) sql connection retry interval
+# sql_retry_interval=10
+###### (StrOpt) file name for sqlite
+# sqlite_db="nova.sqlite"
+###### (BoolOpt) Synchronous mode for sqlite
+# sqlite_synchronous=true
+###### (BoolOpt) Whether to restart guests when the host reboots
+# start_guests_on_host_boot=false
+###### (StrOpt) Top-level directory for maintaining nova's state
+# state_path="/Users/vishvananda/cache/stack/nova/nova/../"
+###### (StrOpt) Stub network related code
+# stub_network="False"
+###### (StrOpt) syslog facility to receive log lines
+# syslog-log-facility="LOG_USER"
+###### (BoolOpt) Whether to use cow images
+# use_cow_images=true
+###### (BoolOpt) use ipv6
+# use_ipv6=false
+###### (BoolOpt) log to standard error
+# use_stderr=true
+###### (BoolOpt) Use syslog for logging.
+# use-syslog=false
+###### (StrOpt) the VC image ID (for a VC image that exists in Glance)
+# vc_image_name="vc_image"
+###### (BoolOpt) Print more verbose output
+# verbose=false
+###### (StrOpt) The volume API class to use
+# volume_api_class="nova.volume.api.API"
+###### (StrOpt) Manager for volume
+# volume_manager="nova.volume.manager.VolumeManager"
+###### (StrOpt) the topic volume nodes listen on
+# volume_topic="volume"
+###### (StrOpt) image id for cloudpipe vpn server
+# vpn_image_id="0"
+###### (StrOpt) Suffix to add to project name for vpn key and secgroups
+# vpn_key_suffix="-vpn"
+###### (StrOpt) Manager for vsa
+# vsa_manager="nova.vsa.manager.VsaManager"
+###### (IntOpt) default partition size for shared capacity
+# vsa_part_size_gb=100
+###### (StrOpt) the topic that nova-vsa service listens on
+# vsa_topic="vsa"
+###### (IntOpt) Number of seconds zombie instances are cleaned up.
+# zombie_instance_updated_at_window=172800
+
+######### defined in nova.log #########
+
+###### (ListOpt) list of logger=LEVEL pairs
+# default_log_levels="amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,eventlet.wsgi.server=WARN"
+###### (StrOpt) If an instance is passed with the log message, format it like this
+# instance_format="[instance: %(uuid)s] "
+###### (StrOpt) format string to use for log messages with context
+# logging_context_format_string="%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s"
+###### (StrOpt) data to append to log format when level is DEBUG
+# logging_debug_format_suffix="from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
+###### (StrOpt) format string to use for log messages without context
+# logging_default_format_string="%(asctime)s %(levelname)s %(name)s [-] %(instance)s%(message)s"
+###### (StrOpt) prefix each line of exception output with this format
+# logging_exception_prefix="(%(name)s): TRACE: "
+###### (BoolOpt) publish error events
+# publish_errors=false
+
+######### defined in nova.utils #########
+
+###### (BoolOpt) Whether to disable inter-process locks
+# disable_process_locking=false
+
+######### defined in nova.service #########
+
+###### (StrOpt) The backend to use for db
+# db_backend="sqlalchemy"
+###### (StrOpt) IP address for EC2 API to listen
+# ec2_listen="0.0.0.0"
+###### (IntOpt) port for ec2 api to listen
+# ec2_listen_port=8773
+###### (BoolOpt) Services to be added to the available pool on create
+# enable_new_services=true
+###### (StrOpt) Template string to be used to generate instance names
+# instance_name_template="instance-%08x"
+###### (StrOpt) IP address for metadata api to listen
+# metadata_listen="0.0.0.0"
+###### (IntOpt) port for metadata api to listen
+# metadata_listen_port=8775
+###### (StrOpt) OpenStack metadata service manager
+# metadata_manager="nova.api.manager.MetadataManager"
+###### (StrOpt) IP address for OpenStack API to listen
+# osapi_compute_listen="0.0.0.0"
+###### (IntOpt) list port for osapi compute
+# osapi_compute_listen_port=8774
+###### (StrOpt) IP address for OpenStack Volume API to listen
+# osapi_volume_listen="0.0.0.0"
+###### (IntOpt) port for os volume api to listen
+# osapi_volume_listen_port=8776
+###### (IntOpt) seconds between running periodic tasks
+# periodic_interval=60
+###### (IntOpt) seconds between nodes reporting state to datastore
+# report_interval=10
+###### (StrOpt) The messaging module to use, defaults to kombu.
+# rpc_backend="nova.rpc.impl_kombu"
+###### (IntOpt) Size of RPC connection pool
+# rpc_conn_pool_size=30
+###### (IntOpt) Seconds to wait for a response from call or multicall
+# rpc_response_timeout=3600
+###### (IntOpt) Size of RPC thread pool
+# rpc_thread_pool_size=1024
+###### (StrOpt) Template string to be used to generate snapshot names
+# snapshot_name_template="snapshot-%08x"
+###### (StrOpt) Template string to be used to generate instance names
+# volume_name_template="volume-%08x"
+###### (StrOpt) Template string to be used to generate VSA names
+# vsa_name_template="vsa-%08x"
+
+######### defined in nova.crypto #########
+
+###### (StrOpt) Filename of root CA
+# ca_file="cacert.pem"
+###### (StrOpt) Where we keep our root CA
+# ca_path="$state_path/CA"
+###### (StrOpt) Filename of root Certificate Revocation List
+# crl_file="crl.pem"
+###### (StrOpt) Filename of private key
+# key_file="private/cakey.pem"
+###### (StrOpt) Where we keep our keys
+# keys_path="$state_path/keys"
+###### (StrOpt) Subject for certificate for projects, %s for project, timestamp
+# project_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s"
+###### (BoolOpt) Should we use a CA for each project?
+# use_project_ca=false
+###### (StrOpt) Subject for certificate for users, %s for project, user, timestamp
+# user_cert_subject="/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s"
+
+######### defined in nova.policy #########
+
+###### (StrOpt) Rule checked when requested rule is not found
+# policy_default_rule="default"
+###### (StrOpt) JSON file representing policy
+# policy_file="policy.json"
+
+######### defined in nova.quota #########
+
+###### (IntOpt) number of instance cores allowed per project
+# quota_cores=20
+###### (IntOpt) number of floating ips allowed per project
+# quota_floating_ips=10
+###### (IntOpt) number of volume gigabytes allowed per project
+# quota_gigabytes=1000
+###### (IntOpt) number of instances allowed per project
+# quota_instances=10
+###### (IntOpt) number of bytes allowed per injected file
+# quota_max_injected_file_content_bytes=10240
+###### (IntOpt) number of bytes allowed per injected file path
+# quota_max_injected_file_path_bytes=255
+###### (IntOpt) number of injected files allowed
+# quota_max_injected_files=5
+###### (IntOpt) number of metadata items allowed per instance
+# quota_metadata_items=128
+###### (IntOpt) megabytes of instance ram allowed per project
+# quota_ram=51200
+###### (IntOpt) number of volumes allowed per project
+# quota_volumes=10
+
+######### defined in nova.test #########
+
+###### (ListOpt) Allowed roles for project
+# allowed_roles="cloudadmin,itsec,sysadmin,netadmin,developer"
+###### (StrOpt) Driver that auth manager uses
+# auth_driver="nova.auth.dbdriver.DbDriver"
+###### (StrOpt) the topic console auth proxy nodes listen on
+# consoleauth_topic="consoleauth"
+###### (StrOpt) Filename of certificate in credentials zip
+# credential_cert_file="cert.pem"
+###### (StrOpt) Filename of private key in credentials zip
+# credential_key_file="pk.pem"
+###### (StrOpt) Filename of rc in credentials zip %s will be replaced by name of the region (nova by default)
+# credential_rc_file="%src"
+###### (StrOpt) Filename of certificate in credentials zip
+# credential_vpn_file="nova-vpn.conf"
+###### (StrOpt) Template for creating users rc file
+# credentials_template="/Users/vishvananda/cache/stack/nova/nova/auth/novarc.template"
+###### (StrOpt) driver to use for database access
+# db_driver="nova.db"
+###### (BoolOpt) should we use everything for testing
+# fake_tests=true
+###### (StrOpt) Timeout after NN seconds when looking for a host.
+# find_host_timeout="30"
+###### (ListOpt) Roles that apply to all projects
+# global_roles="cloudadmin,itsec"
+###### (StrOpt) File name of clean sqlite db
+# sqlite_clean_db="clean.sqlite"
+###### (StrOpt) availability zone of this service
+# storage_availability_zone="nova"
+###### (ListOpt) Roles that ignore authorization checking completely
+# superuser_roles="cloudadmin"
+###### (BoolOpt) This flag must be set to use old style auth
+# use_deprecated_auth=false
+###### (BoolOpt) if True, will not discover local volumes
+# use_local_volumes=true
+###### (StrOpt) Driver to use for volume creation
+# volume_driver="nova.volume.driver.ISCSIDriver"
+###### (BoolOpt) if True will force update capabilities on each check
+# volume_force_update_capabilities=false
+###### (StrOpt) Template for creating users vpn file
+# vpn_client_template="/Users/vishvananda/cache/stack/nova/nova/cloudpipe/client.ovpn.template"
+
+######### defined in nova.auth.ldapdriver #########
+
+###### (StrOpt) cn for Cloud Admins
+# ldap_cloudadmin="cn=cloudadmins,ou=Groups,dc=example,dc=com"
+###### (StrOpt) cn for Developers
+# ldap_developer="cn=developers,ou=Groups,dc=example,dc=com"
+###### (StrOpt) cn for ItSec
+# ldap_itsec="cn=itsec,ou=Groups,dc=example,dc=com"
+###### (StrOpt) cn for NetAdmins
+# ldap_netadmin="cn=netadmins,ou=Groups,dc=example,dc=com"
+###### (StrOpt) LDAP password
+# ldap_password="changeme"
+###### (StrOpt) OU for Projects
+# ldap_project_subtree="ou=Groups,dc=example,dc=com"
+###### (IntOpt) Current version of the LDAP schema
+# ldap_schema_version=2
+###### (StrOpt) cn for Sysadmins
+# ldap_sysadmin="cn=sysadmins,ou=Groups,dc=example,dc=com"
+###### (StrOpt) Point this at your ldap server
+# ldap_url="ldap://localhost"
+###### (StrOpt) DN of admin user
+# ldap_user_dn="cn=Manager,dc=example,dc=com"
+###### (StrOpt) Attribute to use as id
+# ldap_user_id_attribute="uid"
+###### (BoolOpt) Modify user attributes instead of creating/deleting
+# ldap_user_modify_only=false
+###### (StrOpt) Attribute to use as name
+# ldap_user_name_attribute="cn"
+###### (StrOpt) OU for Users
+# ldap_user_subtree="ou=Users,dc=example,dc=com"
+###### (StrOpt) OID for Users
+# ldap_user_unit="Users"
+###### (StrOpt) OU for Roles
+# role_project_subtree="ou=Groups,dc=example,dc=com"
+
+######### defined in nova.api.auth #########
+
+###### (BoolOpt) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy.
+# use_forwarded_for=false
+
+######### defined in nova.api.ec2 #########
+
+###### (BoolOpt) Return the IP address as private dns hostname in describe instances
+# ec2_private_dns_show_ip=false
+###### (StrOpt) URL to get token from ec2 request.
+# keystone_ec2_url="http://localhost:5000/v2.0/ec2tokens"
+###### (IntOpt) Number of failed auths before lockout.
+# lockout_attempts=5
+###### (IntOpt) Number of minutes to lockout if triggered.
+# lockout_minutes=15
+###### (IntOpt) Number of minutes for lockout window.
+# lockout_window=15
+
+######### defined in nova.api.openstack.compute #########
+
+###### (BoolOpt) Permit instance snapshot operations.
+# allow_instance_snapshots=true
+
+######### defined in nova.vnc #########
+
+###### (StrOpt) location of vnc console proxy, in the form "http://127.0.0.1:6080/vnc_auto.html"
+# novncproxy_base_url="http://127.0.0.1:6080/vnc_auto.html"
+###### (BoolOpt) enable vnc related features
+# vnc_enabled=true
+###### (StrOpt) keymap for vnc
+# vnc_keymap="en-us"
+###### (StrOpt) Ip address on which instance vncserversshould listen
+# vncserver_listen="127.0.0.1"
+###### (StrOpt) the address to which proxy clients (like nova-xvpvncproxy) should connect
+# vncserver_proxyclient_address="127.0.0.1"
+###### (StrOpt) location of nova xvp vnc console proxy, in the form "http://127.0.0.1:6081/console"
+# xvpvncproxy_base_url="http://127.0.0.1:6081/console"
+
+######### defined in nova.vnc.xvp_proxy #########
+
+###### (StrOpt) Address that the XCP VNC proxy should bind to
+# xvpvncproxy_host="0.0.0.0"
+###### (IntOpt) Port that the XCP VNC proxy should bind to
+# xvpvncproxy_port=6081
+
+######### defined in nova.ipv6.api #########
+
+###### (StrOpt) Backend to use for IPv6 generation
+# ipv6_backend="rfc2462"
+
+######### defined in nova.network.linux_net #########
+
+###### (IntOpt) Lifetime of a DHCP lease in seconds
+# dhcp_lease_time=120
+###### (StrOpt) location of nova-dhcpbridge
+# dhcpbridge="/Users/vishvananda/cache/stack/nova/bin/nova-dhcpbridge"
+###### (StrOpt) location of flagfile for dhcpbridge
+# dhcpbridge_flagfile="/etc/nova/nova-dhcpbridge.conf"
+###### (StrOpt) dmz range that should be accepted
+# dmz_cidr="10.128.0.0/24"
+###### (StrOpt) if set, uses specific dns server for dnsmasq
+# dns_server=<None>
+###### (StrOpt) Override the default dnsmasq settings with this file
+# dnsmasq_config_file=""
+###### (StrOpt) Driver used to create ethernet devices.
+# linuxnet_interface_driver="nova.network.linux_net.LinuxBridgeInterfaceDriver"
+###### (StrOpt) Name of Open vSwitch bridge used with linuxnet
+# linuxnet_ovs_integration_bridge="br-int"
+###### (StrOpt) MTU setting for vlan
+# network_device_mtu=<None>
+###### (StrOpt) Location to keep network config files
+# networks_path="$state_path/networks"
+###### (StrOpt) Interface for public IP addresses
+# public_interface="eth0"
+###### (StrOpt) Public IP of network host
+# routing_source_ip="$my_ip"
+###### (BoolOpt) send gratuitous ARPs for HA setup
+# send_arp_for_ha=false
+###### (BoolOpt) Use single default gateway. Only first nic of vm will get default gateway from dhcp server
+# use_single_default_gateway=false
+
+######### defined in nova.network.manager #########
+
+###### (BoolOpt) Autoassigning floating ip to VM
+# auto_assign_floating_ip=false
+###### (IntOpt) Number of addresses reserved for vpn clients
+# cnt_vpn_clients=0
+###### (IntOpt) Number of attempts to create unique mac address
+# create_unique_mac_address_attempts=5
+###### (StrOpt) Default pool for floating ips
+# default_floating_pool="nova"
+###### (StrOpt) domain to use for building the hostnames
+# dhcp_domain="novalocal"
+###### (BoolOpt) If True, skip using the queue and make local calls
+# fake_call=false
+###### (IntOpt) Seconds after which a deallocated ip is disassociated
+# fixed_ip_disassociate_timeout=600
+###### (StrOpt) Fixed IP address block
+# fixed_range="10.0.0.0/8"
+###### (StrOpt) Fixed IPv6 address block
+# fixed_range_v6="fd00::/48"
+###### (BoolOpt) Whether to attempt to inject network setup into guest
+# flat_injected=false
+###### (StrOpt) FlatDhcp will bridge into this interface if set
+# flat_interface=<None>
+###### (StrOpt) Bridge for simple network instances
+# flat_network_bridge=<None>
+###### (StrOpt) Dns for simple network
+# flat_network_dns="8.8.4.4"
+###### (StrOpt) Floating IP address block
+# floating_range="4.4.4.0/24"
+###### (BoolOpt) If True, send a dhcp release on instance termination
+# force_dhcp_release=false
+###### (StrOpt) Default IPv4 gateway
+# gateway=<None>
+###### (StrOpt) Default IPv6 gateway
+# gateway_v6=<None>
+###### (StrOpt) Indicates underlying L3 management library
+# l3_lib="nova.network.l3.LinuxNetL3"
+###### (BoolOpt) Default value for multi_host in networks
+# multi_host=false
+###### (StrOpt) Network host to use for ip allocation in flat modes
+# network_host="firefly-2.local"
+###### (IntOpt) Number of addresses in each private subnet
+# network_size=256
+###### (IntOpt) Number of networks to support
+# num_networks=1
+###### (BoolOpt) Whether to update dhcp when fixed_ip is disassociated
+# update_dhcp_on_disassociate=false
+###### (StrOpt) vlans will bridge into this interface if set
+# vlan_interface=<None>
+###### (IntOpt) First VLAN for private networks
+# vlan_start=100
+###### (StrOpt) Public IP for the cloudpipe VPN servers
+# vpn_ip="$my_ip"
+###### (IntOpt) First Vpn port for private networks
+# vpn_start=1000
+
+######### defined in nova.network.quantum.manager #########
+
+###### (StrOpt) HOST for connecting to melange
+# melange_host="127.0.0.1"
+###### (IntOpt) Number retries when contacting melange
+# melange_num_retries=0
+###### (IntOpt) PORT for connecting to melange
+# melange_port=9898
+###### (StrOpt) HOST for connecting to quantum
+# quantum_connection_host="127.0.0.1"
+###### (StrOpt) PORT for connecting to quantum
+# quantum_connection_port="9696"
+###### (StrOpt) Default tenant id when creating quantum networks
+# quantum_default_tenant_id="default"
+###### (StrOpt) Indicates underlying IP address management library
+# quantum_ipam_lib="nova.network.quantum.nova_ipam_lib"
+###### (BoolOpt) Add the link local address to the port security list
+# quantum_port_security_include_link_local=false
+###### (BoolOpt) Whether or not to enable DHCP for networks
+# quantum_use_dhcp=false
+###### (BoolOpt) Whether or not to enable port security
+# quantum_use_port_security=false
+###### (BoolOpt) Use Melange for assignment of MAC addresses
+# use_melange_mac_generation=false
+
+######### defined in nova.compute.manager #########
+
+###### (StrOpt) Driver to use for controlling virtualization
+# compute_driver="nova.virt.connection.get_connection"
+###### (StrOpt) Console proxy host to use to connect to instances on this host.
+# console_host="firefly-2.local"
+###### (StrOpt) Default notification level for outgoing notifications
+# default_notification_level="INFO"
+###### (StrOpt) Default publisher_id for outgoing notifications
+# default_publisher_id="$host"
+###### (IntOpt) Number of seconds between instance info_cache self healing updates
+# heal_instance_info_cache_interval=60
+###### (IntOpt) Interval in seconds for querying the host status
+# host_state_interval=120
+###### (IntOpt) Number of periodic scheduler ticks to wait between runs of the image cache manager.
+# image_cache_manager_interval=3600
+###### (StrOpt) where instances are stored on disk
+# instances_path="$state_path/instances"
+###### (IntOpt) Number of 1 second retries needed in live_migration
+# live_migration_retry_count=30
+###### (IntOpt) Automatically hard reboot an instance if it has been stuck in a rebooting state longer than N seconds. Set to 0 to disable.
+# reboot_timeout=0
+###### (IntOpt) Automatically unrescue an instance after N seconds. Set to 0 to disable.
+# rescue_timeout=0
+###### (IntOpt) Automatically confirm resizes after N seconds. Set to 0 to disable.
+# resize_confirm_window=0
+###### (StrOpt) Action to take if a running deleted instance is detected.Valid options are 'noop', 'log' and 'reap'. Set to 'noop' to disable.
+# running_deleted_instance_action="noop"
+###### (IntOpt) Number of periodic scheduler ticks to wait between runs of the cleanup task.
+# running_deleted_instance_poll_interval=30
+###### (IntOpt) Number of seconds after being deleted when a running instance should be considered eligible for cleanup.
+# running_deleted_instance_timeout=0
+
+######### defined in nova.virt.baremetal.nodes #########
+
+###### (StrOpt) Bare-metal driver runs on
+# baremetal_driver="tilera"
+###### (StrOpt) Tilera command line program for Bare-metal driver
+# tile_monitor="/usr/local/TileraMDE/bin/tile-monitor"
+
+######### defined in nova.virt.baremetal.proxy #########
+
+###### (BoolOpt) Whether to allow in project network traffic
+# baremetal_allow_project_net_traffic=true
+###### (StrOpt) Template file for injected network
+# baremetal_injected_network_template="/Users/vishvananda/cache/stack/nova/nova/virt/interfaces.template"
+###### (StrOpt) baremetal domain type
+# baremetal_type="baremetal"
+###### (StrOpt) Override the default baremetal URI
+# baremetal_uri=""
+###### (BoolOpt) Force backing images to raw format
+# force_raw_images=true
+###### (ListOpt) Order of methods used to mount disk images
+# img_handlers="loop,nbd,guestfs"
+###### (StrOpt) Template file for injected network
+# injected_network_template="/Users/vishvananda/cache/stack/nova/nova/virt/interfaces.template"
+###### (IntOpt) maximum number of possible nbd devices
+# max_nbd_devices=16
+###### (StrOpt) binary to use for qemu-img commands
+# qemu_img="qemu-img"
+###### (IntOpt) time to wait for a NBD device coming up
+# timeout_nbd=10
+###### (MultiStrOpt) mkfs commands for ephemeral device. The format is <os_type>=<mkfs command>
+# virt_mkfs="default=mkfs.ext3 -L %(fs_label)s -F %(target)s"
+# virt_mkfs="linux=mkfs.ext3 -L %(fs_label)s -F %(target)s"
+# virt_mkfs="windows=mkfs.ntfs --force --fast --label %(fs_label)s %(target)s"
+
+######### defined in nova.virt.firewall #########
+
+###### (BoolOpt) Whether to allow network traffic from same network
+# allow_same_net_traffic=true
+
+######### defined in nova.virt.libvirt.connection #########
+
+###### (StrOpt) Define block migration behavior.
+# block_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_NON_SHARED_INC"
+###### (BoolOpt) Write a checksum for files in _base to disk
+# checksum_base_images=false
+###### (StrOpt) CpuInfo XML Template (Used only live migration now)
+# cpuinfo_xml_template="/Users/vishvananda/cache/stack/nova/nova/virt/cpuinfo.xml.template"
+###### (StrOpt) Override the default disk prefix for the devices attached to a server, which is dependent on libvirt_type. (valid options are: sd, xvd, uvd, vd)
+# libvirt_disk_prefix=<None>
+###### (BoolOpt) Inject the admin password at boot time, without an agent.
+# libvirt_inject_password=false
+###### (StrOpt) Libvirt domain type (valid options are: kvm, lxc, qemu, uml, xen)
+# libvirt_type="kvm"
+###### (StrOpt) Override the default libvirt URI (which is dependent on libvirt_type)
+# libvirt_uri=""
+###### (BoolOpt) Use virtio for bridge interfaces
+# libvirt_use_virtio_for_bridges=false
+###### (StrOpt) The libvirt VIF driver to configure the VIFs.
+# libvirt_vif_driver="nova.virt.libvirt.vif.LibvirtBridgeDriver"
+###### (StrOpt) Type of VIF to create.
+# libvirt_vif_type="bridge"
+###### (ListOpt) Libvirt handlers for remote volumes.
+# libvirt_volume_drivers="iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver"
+###### (IntOpt) Number of seconds to wait for instance to shut down after soft reboot request is made. We fall back to hard reboot if instance does not shutdown within this window.
+# libvirt_wait_soft_reboot_seconds=120
+###### (StrOpt) Libvirt XML Template
+# libvirt_xml_template="/Users/vishvananda/cache/stack/nova/nova/virt/libvirt.xml.template"
+###### (IntOpt) Define live migration behavior
+# live_migration_bandwidth=0
+###### (StrOpt) Define live migration behavior.
+# live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER"
+###### (StrOpt) Define protocol used by live_migration feature
+# live_migration_uri="qemu+tcp://%s/system"
+###### (BoolOpt) Should unused base images be removed?
+# remove_unused_base_images=false
+###### (IntOpt) Unused unresized base images younger than this will not be removed
+# remove_unused_original_minimum_age_seconds=86400
+###### (IntOpt) Unused resized base images younger than this will not be removed
+# remove_unused_resized_minimum_age_seconds=3600
+###### (StrOpt) Rescue ami image
+# rescue_image_id=<None>
+###### (StrOpt) Rescue aki image
+# rescue_kernel_id=<None>
+###### (StrOpt) Rescue ari image
+# rescue_ramdisk_id=<None>
+###### (StrOpt) Snapshot image format (valid options are : raw, qcow2, vmdk, vdi). Defaults to same as source image
+# snapshot_image_format=<None>
+###### (BoolOpt) Sync virtual and real mouse cursors in Windows VMs
+# use_usb_tablet=true
+
+######### defined in nova.virt.libvirt.vif #########
+
+###### (StrOpt) Name of Integration Bridge used by Open vSwitch
+# libvirt_ovs_bridge="br-int"
+
+######### defined in nova.virt.vmwareapi.vim #########
+
+###### (StrOpt) VIM Service WSDL Location e.g http://<server>/vimService.wsdl. Due to a bug in vSphere ESX 4.1 default wsdl. Refer readme-vmware to setup
+# vmwareapi_wsdl_loc=<None>
+
+######### defined in nova.virt.vmwareapi.vmops #########
+
+###### (StrOpt) The VMWare VIF driver to configure the VIFs.
+# vmware_vif_driver="nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver"
+
+######### defined in nova.virt.vmwareapi_conn #########
+
+###### (FloatOpt) The number of times we retry on failures, e.g., socket error, etc. Used only if connection_type is vmwareapi
+# vmwareapi_api_retry_count=10
+###### (StrOpt) URL for connection to VMWare ESX host.Required if connection_type is vmwareapi.
+# vmwareapi_host_ip=<None>
+###### (StrOpt) Password for connection to VMWare ESX host. Used only if connection_type is vmwareapi.
+# vmwareapi_host_password=<None>
+###### (StrOpt) Username for connection to VMWare ESX host. Used only if connection_type is vmwareapi.
+# vmwareapi_host_username=<None>
+###### (FloatOpt) The interval used for polling of remote tasks. Used only if connection_type is vmwareapi
+# vmwareapi_task_poll_interval=5.0
+###### (StrOpt) Physical ethernet adapter name for vlan networking
+# vmwareapi_vlan_interface="vmnic0"
+
+######### defined in nova.virt.xenapi.pool #########
+
+###### (IntOpt) time to wait for a block device to be created
+# block_device_creation_timeout=10
+###### (StrOpt) Default OS type
+# default_os_type="linux"
+###### (IntOpt) maximum size in bytes of kernel or ramdisk images
+# max_kernel_ramdisk_size=16777216
+###### (StrOpt) Filter for finding the SR to be used to install guest instances on. The default value is the Local Storage in default XenServer/XCP installations. To select an SR with a different matching criteria, you could set it to other-config:my_favorite_sr=true. On the other hand, to fall back on the Default SR, as displayed by XenCenter, set this flag to: default-sr:true
+# sr_matching_filter="other-config:i18n-key=local-storage"
+###### (BoolOpt) To use for hosts with different CPUs
+# use_join_force=true
+###### (BoolOpt) Whether to use sparse_copy for copying data on a resize down (False will use standard dd). This speeds up resizes down considerably since large runs of zeros won't have to be rsynced
+# xenapi_sparse_copy=true
+
+######### defined in nova.virt.xenapi.vif #########
+
+###### (StrOpt) Name of Integration Bridge used by Open vSwitch
+# xenapi_ovs_integration_bridge="xapi1"
+
+######### defined in nova.virt.xenapi.vmops #########
+
+###### (IntOpt) number of seconds to wait for agent to be fully operational
+# agent_version_timeout=300
+###### (BoolOpt) Whether to generate swap (False means fetching it from OVA)
+# xenapi_generate_swap=false
+###### (IntOpt) number of seconds to wait for instance to go to running state
+# xenapi_running_timeout=60
+###### (StrOpt) The XenAPI VIF driver using XenServer Network APIs.
+# xenapi_vif_driver="nova.virt.xenapi.vif.XenAPIBridgeDriver"
+
+######### defined in nova.virt.xenapi_conn #########
+
+###### (StrOpt) IQN Prefix
+# iqn_prefix="iqn.2010-10.org.openstack"
+###### (StrOpt) iSCSI Target Host
+# target_host=<None>
+###### (StrOpt) iSCSI Target Port, 3260 Default
+# target_port="3260"
+###### (StrOpt) Specifies the path in which the xenapi guest agent should be located. If the agent is present, network configuration is not injected into the image. Used if connection_type=xenapi and flat_injected=True
+# xenapi_agent_path="usr/sbin/xe-update-networking"
+###### (IntOpt) Maximum number of concurrent XenAPI connections. Used only if connection_type=xenapi.
+# xenapi_connection_concurrent=5
+###### (StrOpt) Password for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.
+# xenapi_connection_password=<None>
+###### (StrOpt) URL for connection to XenServer/Xen Cloud Platform. Required if connection_type=xenapi.
+# xenapi_connection_url=<None>
+###### (StrOpt) Username for connection to XenServer/Xen Cloud Platform. Used only if connection_type=xenapi.
+# xenapi_connection_username="root"
+###### (BoolOpt) Log all instance calls to XenAPI in the database.
+# xenapi_log_instance_actions=false
+###### (IntOpt) Timeout in seconds for XenAPI login.
+# xenapi_login_timeout=10
+###### (BoolOpt) Used to enable the remapping of VBD dev (Works around an issue in Ubuntu Maverick)
+# xenapi_remap_vbd_dev=false
+###### (StrOpt) Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb)
+# xenapi_remap_vbd_dev_prefix="sd"
+###### (StrOpt) Base path to the storage repository
+# xenapi_sr_base_path="/var/run/sr-mount"
+###### (FloatOpt) The interval used for polling of remote tasks (Async.VM.start, etc). Used only if connection_type=xenapi.
+# xenapi_task_poll_interval=0.5
+###### (IntOpt) Max number of times to poll for VHD to coalesce. Used only if connection_type=xenapi.
+# xenapi_vhd_coalesce_max_attempts=5
+###### (FloatOpt) The interval used for polling of coalescing vhds. Used only if connection_type=xenapi.
+# xenapi_vhd_coalesce_poll_interval=5.0
+
+######### defined in nova.console.manager #########
+
+###### (StrOpt) Driver to use for the console proxy
+# console_driver="nova.console.xvp.XVPConsoleProxy"
+###### (StrOpt) Publicly visable name for this console host
+# console_public_hostname="firefly-2.local"
+###### (BoolOpt) Stub calls to compute worker for tests
+# stub_compute=false
+
+######### defined in nova.console.vmrc #########
+
+###### (IntOpt) number of retries for retrieving VMRC information
+# console_vmrc_error_retries=10
+###### (IntOpt) port for VMware VMRC connections
+# console_vmrc_port=443
+
+######### defined in nova.console.xvp #########
+
+###### (StrOpt) generated XVP conf file
+# console_xvp_conf="/etc/xvp.conf"
+###### (StrOpt) XVP conf template
+# console_xvp_conf_template="/Users/vishvananda/cache/stack/nova/nova/console/xvp.conf.template"
+###### (StrOpt) XVP log file
+# console_xvp_log="/var/log/xvp.log"
+###### (IntOpt) port for XVP to multiplex VNC connections on
+# console_xvp_multiplex_port=5900
+###### (StrOpt) XVP master process pid file
+# console_xvp_pid="/var/run/xvp.pid"
+
+######### defined in nova.consoleauth.manager #########
+
+###### (IntOpt) How many seconds before deleting tokens
+# console_token_ttl=600
+###### (StrOpt) Manager for console auth
+# consoleauth_manager="nova.consoleauth.manager.ConsoleAuthManager"
+
+######### defined in nova.image.s3 #########
+
+###### (StrOpt) parent dir for tempdir used for image decryption
+# image_decryption_dir="/tmp"
+###### (StrOpt) access key to use for s3 server for images
+# s3_access_key="notchecked"
+###### (StrOpt) secret key to use for s3 server for images
+# s3_secret_key="notchecked"
+
+######### defined in nova.cloudpipe.pipelib #########
+
+###### (StrOpt) Template for cloudpipe instance boot script
+# boot_script_template="/Users/vishvananda/cache/stack/nova/nova/cloudpipe/bootscript.template"
+###### (StrOpt) Netmask to push into openvpn config
+# dmz_mask="255.255.255.0"
+###### (StrOpt) Network to push into openvpn config
+# dmz_net="10.0.0.0"
+###### (StrOpt) Instance type for vpn instances
+# vpn_instance_type="m1.tiny"
+
+######### defined in nova.notifier.list_notifier #########
+
+###### (MultiStrOpt) List of drivers to send notifications
+# list_notifier_drivers="nova.notifier.no_op_notifier"
+
+######### defined in nova.notifier.rabbit_notifier #########
+
+###### (ListOpt) AMQP topic used for Nova notifications
+# notification_topics="notifications"
+
+######### defined in nova.objectstore.s3server #########
+
+###### (StrOpt) path to s3 buckets
+# buckets_path="$state_path/buckets"
+
+######### defined in nova.rpc.impl_kombu #########
+
+###### (StrOpt) SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs=""
+###### (StrOpt) SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile=""
+###### (StrOpt) SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile=""
+###### (StrOpt) SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version=""
+
+######### defined in nova.scheduler.driver #########
+
+###### (StrOpt) The scheduler host manager class to use
+# scheduler_host_manager="nova.scheduler.host_manager.HostManager"
+
+######### defined in nova.scheduler.filters.core_filter #########
+
+###### (FloatOpt) Virtual CPU to Physical CPU allocation ratio
+# cpu_allocation_ratio=16.0
+
+######### defined in nova.scheduler.filters.ram_filter #########
+
+###### (FloatOpt) virtual ram to physical ram allocation ratio
+# ram_allocation_ratio=1.5
+
+######### defined in nova.scheduler.host_manager #########
+
+###### (IntOpt) Amount of disk in MB to reserve for host/dom0
+# reserved_host_disk_mb=0
+###### (IntOpt) Amount of memory in MB to reserve for host/dom0
+# reserved_host_memory_mb=512
+###### (MultiStrOpt) Filter classes available to the scheduler which may be specified more than once. An entry of "nova.scheduler.filters.standard_filters" maps to all filters included with nova.
+# scheduler_available_filters="nova.scheduler.filters.standard_filters"
+###### (ListOpt) Which filter class names to use for filtering hosts when not specified in the request.
+# scheduler_default_filters="AvailabilityZoneFilter,RamFilter,ComputeFilter"
+
+######### defined in nova.scheduler.least_cost #########
+
+###### (FloatOpt) How much weight to give the fill-first cost function
+# compute_fill_first_cost_fn_weight=1.0
+###### (ListOpt) Which cost functions the LeastCostScheduler should use
+# least_cost_functions="nova.scheduler.least_cost.compute_fill_first_cost_fn"
+###### (FloatOpt) How much weight to give the noop cost function
+# noop_cost_fn_weight=1.0
+
+######### defined in nova.scheduler.manager #########
+
+###### (StrOpt) Default driver to use for the scheduler
+# scheduler_driver="nova.scheduler.multi.MultiScheduler"
+
+######### defined in nova.scheduler.multi #########
+
+###### (StrOpt) Driver to use for scheduling compute calls
+# compute_scheduler_driver="nova.scheduler.distributed_scheduler.DistributedScheduler"
+###### (StrOpt) Driver to use for scheduling volume calls
+# volume_scheduler_driver="nova.scheduler.chance.ChanceScheduler"
+
+######### defined in nova.scheduler.scheduler_options #########
+
+###### (StrOpt) Absolute path to scheduler configuration JSON file.
+# scheduler_json_config_location=""
+
+######### defined in nova.scheduler.simple #########
+
+###### (IntOpt) maximum number of instance cores to allow per host
+# max_cores=16
+###### (IntOpt) maximum number of volume gigabytes to allow per host
+# max_gigabytes=10000
+###### (IntOpt) maximum number of networks to allow per host
+# max_networks=1000
+###### (BoolOpt) Allow overcommitting vcpus on isolated hosts
+# skip_isolated_core_check=true
+
+######### defined in nova.scheduler.vsa #########
+
+###### (IntOpt) The percentage range for capacity comparison
+# drive_type_approx_capacity_percent=10
+###### (StrOpt) EC2 access key used by VSA for accessing nova
+# vsa_ec2_access_key=<None>
+###### (StrOpt) User ID used by VSA for accessing nova
+# vsa_ec2_user_id=<None>
+###### (BoolOpt) Ask scheduler to create multiple volumes in one call
+# vsa_multi_vol_creation=true
+###### (BoolOpt) Allow selection of same host for multiple drives
+# vsa_select_unique_drives=true
+###### (IntOpt) The number of unique hosts per storage allocation
+# vsa_unique_hosts_per_alloc=10
+###### (StrOpt) Name of volume type associated with FE VSA volumes
+# vsa_volume_type_name="VSA volume type"
+
+######### defined in nova.vsa.manager #########
+
+###### (StrOpt) Driver to use for controlling VSAs
+# vsa_driver="nova.vsa.connection.get_connection"
+
+######### defined in nova.volume.driver #########
+
+###### (StrOpt) iscsi target user-land tool to use
+# iscsi_helper="ietadm"
+###### (StrOpt) use this ip for iscsi
+# iscsi_ip_address="$my_ip"
+###### (IntOpt) Number of iscsi target ids per host
+# iscsi_num_targets=100
+###### (IntOpt) The port that the iSCSI daemon is listening on
+# iscsi_port=3260
+###### (StrOpt) prefix for iscsi volumes
+# iscsi_target_prefix="iqn.2010-10.org.openstack:"
+###### (StrOpt) number of times to rescan iSCSI target to find volume
+# num_iscsi_scan_tries="3"
+###### (StrOpt) number of times to attempt to run flakey shell commands
+# num_shell_tries="3"
+###### (StrOpt) the rbd pool in which volumes are stored
+# rbd_pool="rbd"
+###### (StrOpt) Name for the VG that will contain exported volumes
+# volume_group="nova-volumes"
+
+######### defined in nova.volume.netapp #########
+
+###### (StrOpt) User name for the DFM server
+# netapp_login=<None>
+###### (StrOpt) Password for the DFM server
+# netapp_password=<None>
+###### (StrOpt) Hostname for the DFM server
+# netapp_server_hostname=<None>
+###### (IntOpt) Port number for the DFM server
+# netapp_server_port=8088
+###### (StrOpt) Storage service to use for provisioning
+# netapp_storage_service=<None>
+###### (StrOpt) Vfiler to use for provisioning
+# netapp_vfiler=<None>
+###### (StrOpt) URL of the WSDL file for the DFM server
+# netapp_wsdl_url=<None>
+
+######### defined in nova.volume.nexenta.volume #########
+
+###### (StrOpt) block size for volumes (blank=default,8KB)
+# nexenta_blocksize=""
+###### (StrOpt) IP address of Nexenta SA
+# nexenta_host=""
+###### (IntOpt) Nexenta target portal port
+# nexenta_iscsi_target_portal_port=3260
+###### (StrOpt) Password to connect to Nexenta SA
+# nexenta_password="nexenta"
+###### (IntOpt) HTTP port to connect to Nexenta REST API server
+# nexenta_rest_port=2000
+###### (StrOpt) Use http or https for REST connection (default auto)
+# nexenta_rest_protocol="auto"
+###### (BoolOpt) flag to create sparse volumes
+# nexenta_sparse=false
+###### (StrOpt) prefix for iSCSI target groups on SA
+# nexenta_target_group_prefix="nova/"
+###### (StrOpt) IQN prefix for iSCSI targets
+# nexenta_target_prefix="iqn.1986-03.com.sun:02:nova-"
+###### (StrOpt) User name to connect to Nexenta SA
+# nexenta_user="admin"
+###### (StrOpt) pool on SA that will hold all volumes
+# nexenta_volume="nova"
+
+######### defined in nova.volume.san #########
+
+###### (StrOpt) Cluster name to use for creating volumes
+# san_clustername=""
+###### (StrOpt) IP address of SAN controller
+# san_ip=""
+###### (BoolOpt) Execute commands locally instead of over SSH; use if the volume service is running on the SAN device
+# san_is_local=false
+###### (StrOpt) Username for SAN controller
+# san_login="admin"
+###### (StrOpt) Password for SAN controller
+# san_password=""
+###### (StrOpt) Filename of private key to use for SSH authentication
+# san_private_key=""
+###### (IntOpt) SSH port to use with SAN
+# san_ssh_port=22
+###### (BoolOpt) Use thin provisioning for SAN volumes?
+# san_thin_provision=true
+###### (StrOpt) The ZFS path under which to create zvols for volumes.
+# san_zfs_volume_base="rpool/"
+
+# Total option count: 456
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 99831fcf8..15bf9283e 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -76,23 +76,6 @@ class MetadataRequestHandler(wsgi.Application):
network_api=self.network_api,
volume_api=volume.API())
- def _get_mpi_data(self, context, project_id):
- result = {}
- search_opts = {'project_id': project_id, 'deleted': False}
- for instance in self.compute_api.get_all(context,
- search_opts=search_opts):
- ip_info = ec2utils.get_ip_info_for_instance(context, instance)
- # only look at ipv4 addresses
- fixed_ips = ip_info['fixed_ips']
- if fixed_ips:
- line = '%s slots=%d' % (fixed_ips[0], instance['vcpus'])
- key = str(instance['key_name'])
- if key in result:
- result[key].append(line)
- else:
- result[key] = [line]
- return result
-
def _format_instance_mapping(self, ctxt, instance_ref):
root_device_name = instance_ref['root_device_name']
if root_device_name is None:
@@ -150,7 +133,6 @@ class MetadataRequestHandler(wsgi.Application):
except exception.NotFound:
return None
- mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
hostname = "%s.%s" % (instance_ref['hostname'], FLAGS.dhcp_domain)
host = instance_ref['host']
services = db.service_get_all_by_host(ctxt.elevated(), host)
@@ -184,8 +166,7 @@ class MetadataRequestHandler(wsgi.Application):
'public-hostname': hostname,
'public-ipv4': floating_ip,
'reservation-id': instance_ref['reservation_id'],
- 'security-groups': security_groups,
- 'mpi': mpi}}
+ 'security-groups': security_groups}}
# public-keys should be in meta-data only if user specified one
if instance_ref['key_name']:
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index d2a909409..3fefa6718 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -144,17 +144,16 @@ def _get_marker_param(request):
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
- """
- Return a slice of items according to requested offset and limit.
+ """Return a slice of items according to requested offset and limit.
- @param items: A sliceable entity
- @param request: `wsgi.Request` possibly containing 'offset' and 'limit'
+ :param items: A sliceable entity
+ :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
- @kwarg max_limit: The maximum number of items to return from 'items'
+ :kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
@@ -309,15 +308,15 @@ def get_nw_info_for_instance(context, instance):
def get_networks_for_instance(context, instance):
- """Returns a prepared nw_info list for passing into the view
- builders
-
- We end up with a data structure like:
- {'public': {'ips': [{'addr': '10.0.0.1', 'version': 4},
- {'addr': '2001::1', 'version': 6}],
- 'floating_ips': [{'addr': '172.16.0.1', 'version': 4},
- {'addr': '172.16.2.1', 'version': 4}]},
- ...}
+ """Returns a prepared nw_info list for passing into the view builders
+
+ We end up with a data structure like::
+
+ {'public': {'ips': [{'addr': '10.0.0.1', 'version': 4},
+ {'addr': '2001::1', 'version': 6}],
+ 'floating_ips': [{'addr': '172.16.0.1', 'version': 4},
+ {'addr': '172.16.2.1', 'version': 4}]},
+ ...}
"""
nw_info = get_nw_info_for_instance(context, instance)
return get_networks_for_instance_from_nw_info(nw_info)
diff --git a/nova/api/openstack/compute/contrib/flavorextradata.py b/nova/api/openstack/compute/contrib/flavorextradata.py
index f821a8ffb..92189fd51 100644
--- a/nova/api/openstack/compute/contrib/flavorextradata.py
+++ b/nova/api/openstack/compute/contrib/flavorextradata.py
@@ -16,11 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-The Flavor extra data extension
+"""The Flavor extra data extension
+
Openstack API version 1.1 lists "name", "ram", "disk", "vcpus" as flavor
attributes. This extension adds to that list:
- OS-FLV-EXT-DATA:ephemeral
+
+- OS-FLV-EXT-DATA:ephemeral
"""
from nova import exception
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index df19872d7..a24b186c4 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -216,7 +216,9 @@ class HostController(object):
:param context: security context
:param host: hostname
:returns: expected to use HostShowTemplate.
- ex. {'host': {'resource':D},..}
+ ex.::
+
+ {'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index 3245d49d2..4d533d522 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -382,8 +382,8 @@ class WsgiLimiter(object):
"""
Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
- To use:
- POST /<username> with JSON data such as:
+ To use, POST ``/<username>`` with JSON data such as::
+
{
"verb" : GET,
"path" : "/servers"
diff --git a/nova/api/validator.py b/nova/api/validator.py
index f3824075e..cc89fe1d9 100644
--- a/nova/api/validator.py
+++ b/nova/api/validator.py
@@ -114,10 +114,11 @@ def validate_user_data(user_data):
def validate(args, validator):
"""Validate values of args against validators in validator.
- args Dict of values to be validated.
- validator A dict where the keys map to keys in args
- and the values are validators.
- Applies each validator to args[key]
+ :param args: Dict of values to be validated.
+ :param validator: A dict where the keys map to keys in args
+ and the values are validators.
+ Applies each validator to ``args[key]``
+ :returns: True if validation succeeds. Otherwise False.
A validator should be a callable which accepts 1 argument and which
returns True if the argument passes validation. False otherwise.
@@ -126,7 +127,6 @@ def validate(args, validator):
Only validates keys which show up in both args and validator.
- returns True if validation succeeds. Otherwise False.
"""
for key in validator:
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index d092e7e42..82c6816a0 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -244,9 +244,9 @@ class FakeLDAP(object):
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
- Args:
- dn -- a dn
- attrs -- a list of tuples in the following form:
+ :param dn: a dn
+ :param attrs: a list of tuples in the following form::
+
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index d2c5bc647..438066e3b 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -126,11 +126,17 @@ class User(AuthBase):
"""Object representing a user
The following attributes are defined:
- :id: A system identifier for the user. A string (for LDAP)
- :name: The user name, potentially in some more friendly format
- :access: The 'username' for EC2 authentication
- :secret: The 'password' for EC2 authenticatoin
- :admin: ???
+
+ ``id``
+ A system identifier for the user. A string (for LDAP)
+ ``name``
+ The user name, potentially in some more friendly format
+ ``access``
+ The 'username' for EC2 authentication
+ ``secret``
+ The 'password' for EC2 authenticatoin
+ ``admin``
+ ???
"""
def __init__(self, id, name, access, secret, admin):
@@ -256,35 +262,35 @@ class AuthManager(object):
a project with the same name as the user. This way, older tools
that have no project knowledge will still work.
- @type access: str
- @param access: Access key for user in the form "access:project".
+ :type access: str
+ :param access: Access key for user in the form "access:project".
- @type signature: str
- @param signature: Signature of the request.
+ :type signature: str
+ :param signature: Signature of the request.
- @type params: list of str
- @param params: Web paramaters used for the signature.
+ :type params: list of str
+ :param params: Web paramaters used for the signature.
- @type verb: str
- @param verb: Web request verb ('GET' or 'POST').
+ :type verb: str
+ :param verb: Web request verb ('GET' or 'POST').
- @type server_string: str
- @param server_string: Web request server string.
+ :type server_string: str
+ :param server_string: Web request server string.
- @type path: str
- @param path: Web request path.
+ :type path: str
+ :param path: Web request path.
- @type check_type: str
- @param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
+ :type check_type: str
+ :param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
S3. Any other value will cause signature not to be
checked.
- @type headers: list
- @param headers: HTTP headers passed with the request (only needed for
+ :type headers: list
+ :param headers: HTTP headers passed with the request (only needed for
s3 signature checks)
- @rtype: tuple (User, Project)
- @return: User and project that the request represents.
+ :rtype: tuple (User, Project)
+ :return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
@@ -362,11 +368,11 @@ class AuthManager(object):
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass authorization
- @type user: User or uid
- @param user: User to check.
+ :type user: User or uid
+ :param user: User to check.
- @rtype: bool
- @return: True for superuser.
+ :rtype: bool
+ :return: True for superuser.
"""
if not isinstance(user, User):
user = self.get_user(user)
@@ -380,11 +386,11 @@ class AuthManager(object):
def is_admin(self, user):
"""Checks for admin status, allowing user to access all projects
- @type user: User or uid
- @param user: User to check.
+ :type user: User or uid
+ :param user: User to check.
- @rtype: bool
- @return: True for admin.
+ :rtype: bool
+ :return: True for admin.
"""
if not isinstance(user, User):
user = self.get_user(user)
@@ -426,17 +432,17 @@ class AuthManager(object):
see if the user is the project_manager of the specified project. It
is the same as calling is_project_manager(user, project).
- @type user: User or uid
- @param user: User to check.
+ :type user: User or uid
+ :param user: User to check.
- @type role: str
- @param role: Role to check.
+ :type role: str
+ :param role: Role to check.
- @type project: Project or project_id
- @param project: Project in which to look for local role.
+ :type project: Project or project_id
+ :param project: Project in which to look for local role.
- @rtype: bool
- @return: True if the user has the role.
+ :rtype: bool
+ :return: True if the user has the role.
"""
if role == 'projectmanager':
if not project:
@@ -465,14 +471,14 @@ class AuthManager(object):
The 'projectmanager' role is special and can't be added or removed.
- @type user: User or uid
- @param user: User to which to add role.
+ :type user: User or uid
+ :param user: User to which to add role.
- @type role: str
- @param role: Role to add.
+ :type role: str
+ :param role: Role to add.
- @type project: Project or project_id
- @param project: Project in which to add local role.
+ :type project: Project or project_id
+ :param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.UserRoleNotFound(role_id=role)
@@ -498,14 +504,14 @@ class AuthManager(object):
The 'projectmanager' role is special and can't be added or removed.
- @type user: User or uid
- @param user: User from which to remove role.
+ :type user: User or uid
+ :param user: User from which to remove role.
- @type role: str
- @param role: Role to remove.
+ :type role: str
+ :param role: Role to remove.
- @type project: Project or project_id
- @param project: Project in which to remove local role.
+ :type project: Project or project_id
+ :param project: Project in which to remove local role.
"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
@@ -560,23 +566,23 @@ class AuthManager(object):
member_users=None):
"""Create a project
- @type name: str
- @param name: Name of the project to create. The name will also be
+ :type name: str
+ :param name: Name of the project to create. The name will also be
used as the project id.
- @type manager_user: User or uid
- @param manager_user: This user will be the project manager.
+ :type manager_user: User or uid
+ :param manager_user: This user will be the project manager.
- @type description: str
- @param project: Description of the project. If no description is
+ :type description: str
+ :param project: Description of the project. If no description is
specified, the name of the project will be used.
- @type member_users: list of User or uid
- @param: Initial project members. The project manager will always be
+ :type member_users: list of User or uid
+ :param: Initial project members. The project manager will always be
added as a member, even if he isn't specified in this list.
- @rtype: Project
- @return: The new project.
+ :rtype: Project
+ :return: The new project.
"""
if member_users:
member_users = [User.safe_id(u) for u in member_users]
@@ -594,14 +600,14 @@ class AuthManager(object):
def modify_project(self, project, manager_user=None, description=None):
"""Modify a project
- @type name: Project or project_id
- @param project: The project to modify.
+ :type name: Project or project_id
+ :param project: The project to modify.
- @type manager_user: User or uid
- @param manager_user: This user will be the new project manager.
+ :type manager_user: User or uid
+ :param manager_user: This user will be the new project manager.
- @type description: str
- @param project: This will be the new description of the project.
+ :type description: str
+ :param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
@@ -645,12 +651,12 @@ class AuthManager(object):
def get_project_vpn_data(project):
"""Gets vpn ip and port for project
- @type project: Project or project_id
- @param project: Project from which to get associated vpn data
+ :type project: Project or project_id
+ :param project: Project from which to get associated vpn data
- @rvalue: tuple of (str, str)
- @return: A tuple containing (ip, port) or None, None if vpn has
- not been allocated for user.
+ :rvalue: tuple of (str, str)
+ :return: A tuple containing (ip, port) or None, None if vpn has
+ not been allocated for user.
"""
networks = db.project_get_networks(context.get_admin_context(),
@@ -696,24 +702,24 @@ class AuthManager(object):
def create_user(self, name, access=None, secret=None, admin=False):
"""Creates a user
- @type name: str
- @param name: Name of the user to create.
+ :type name: str
+ :param name: Name of the user to create.
- @type access: str
- @param access: Access Key (defaults to a random uuid)
+ :type access: str
+ :param access: Access Key (defaults to a random uuid)
- @type secret: str
- @param secret: Secret Key (defaults to a random uuid)
+ :type secret: str
+ :param secret: Secret Key (defaults to a random uuid)
- @type admin: bool
- @param admin: Whether to set the admin flag. The admin flag gives
+ :type admin: bool
+ :param admin: Whether to set the admin flag. The admin flag gives
superuser status regardless of roles specified for the user.
- @type create_project: bool
- @param: Whether to create a project for the user with the same name.
+ :type create_project: bool
+ :param: Whether to create a project for the user with the same name.
- @rtype: User
- @return: The new user.
+ :rtype: User
+ :return: The new user.
"""
if access is None:
access = str(uuid.uuid4())
diff --git a/nova/cert/manager.py b/nova/cert/manager.py
index d44191eb0..b9f35b72d 100644
--- a/nova/cert/manager.py
+++ b/nova/cert/manager.py
@@ -17,10 +17,11 @@
Cert manager manages x509 certificates.
**Related Flags**
+
:cert_topic: What :mod:`rpc` topic to listen to (default: `cert`).
:cert_manager: The module name of a class derived from
- :class:`manager.Manager` (default:
- :class:`nova.cert.manager.Manager`).
+ :class:`manager.Manager` (default:
+ :class:`nova.cert.manager.Manager`).
"""
import base64
diff --git a/nova/common/policy.py b/nova/common/policy.py
index 34492f73d..ec944a1cc 100644
--- a/nova/common/policy.py
+++ b/nova/common/policy.py
@@ -49,49 +49,64 @@ def enforce(match_list, target_dict, credentials_dict):
"""Enforces authorization of some rules against credentials.
:param match_list: nested tuples of data to match against
- The basic brain supports three types of match lists:
- 1) rules
- looks like: ('rule:compute:get_instance',)
- Retrieves the named rule from the rules dict and recursively
- checks against the contents of the rule.
- 2) roles
- looks like: ('role:compute:admin',)
- Matches if the specified role is in credentials_dict['roles'].
- 3) generic
- ('tenant_id:%(tenant_id)s',)
- Substitutes values from the target dict into the match using
- the % operator and matches them against the creds dict.
-
- Combining rules:
- The brain returns True if any of the outer tuple of rules match
- and also True if all of the inner tuples match. You can use this to
- perform simple boolean logic. For example, the following rule would
- return True if the creds contain the role 'admin' OR the if the
- tenant_id matches the target dict AND the the creds contains the
- role 'compute_sysadmin':
-
- {
- "rule:combined": (
- 'role:admin',
- ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin')
- )
- }
-
-
- Note that rule and role are reserved words in the credentials match, so
- you can't match against properties with those names. Custom brains may
- also add new reserved words. For example, the HttpBrain adds http as a
- reserved word.
+
+ The basic brain supports three types of match lists:
+
+ 1) rules
+
+ looks like: ``('rule:compute:get_instance',)``
+
+ Retrieves the named rule from the rules dict and recursively
+ checks against the contents of the rule.
+
+ 2) roles
+
+ looks like: ``('role:compute:admin',)``
+
+ Matches if the specified role is in credentials_dict['roles'].
+
+ 3) generic
+
+ looks like: ``('tenant_id:%(tenant_id)s',)``
+
+ Substitutes values from the target dict into the match using
+ the % operator and matches them against the creds dict.
+
+ Combining rules:
+
+ The brain returns True if any of the outer tuple of rules
+ match and also True if all of the inner tuples match. You
+ can use this to perform simple boolean logic. For
+ example, the following rule would return True if the creds
+ contain the role 'admin' OR the if the tenant_id matches
+ the target dict AND the the creds contains the role
+ 'compute_sysadmin':
+
+ ::
+
+ {
+ "rule:combined": (
+ 'role:admin',
+ ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin')
+ )
+ }
+
+ Note that rule and role are reserved words in the credentials match, so
+ you can't match against properties with those names. Custom brains may
+ also add new reserved words. For example, the HttpBrain adds http as a
+ reserved word.
:param target_dict: dict of object properties
- Target dicts contain as much information as we can about the object being
- operated on.
+
+ Target dicts contain as much information as we can about the object being
+ operated on.
:param credentials_dict: dict of actor properties
- Credentials dicts contain as much information as we can about the user
- performing the action.
- :raises NotAuthorized if the check fails
+ Credentials dicts contain as much information as we can about the user
+ performing the action.
+
+ :raises NotAuthorized: if the check fails
"""
global _BRAIN
diff --git a/nova/compat/flagfile.py b/nova/compat/flagfile.py
index 3b5845c3d..0ffa7fe93 100644
--- a/nova/compat/flagfile.py
+++ b/nova/compat/flagfile.py
@@ -173,7 +173,7 @@ def handle_flagfiles(args, tempdir=None):
def handle_flagfiles_managed(args):
'''A context manager for handle_flagfiles() which removes temp files.
- For use with the 'with' statement, i.e.
+ For use with the 'with' statement, i.e.::
with handle_flagfiles_managed(args) as args:
# Do stuff
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 73fceb2f4..4b640b6ef 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -216,9 +216,13 @@ class ComputeManager(manager.SchedulerDependentManager):
return self.db.instance_update(context, instance_id, kwargs)
def _set_instance_error_state(self, context, instance_uuid):
- self._instance_update(context,
- instance_uuid,
- vm_state=vm_states.ERROR)
+ try:
+ self._instance_update(context,
+ instance_uuid, vm_state=vm_states.ERROR)
+ except exception.InstanceNotFound:
+ LOG.debug(_("Instance %(instance_uuid)s has been destroyed "
+ "from under us while trying to set it to ERROR") %
+ locals())
def init_host(self):
"""Initialization for a standalone compute service."""
@@ -1162,6 +1166,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(instance_ref,
"resize.confirm.start")
+ # NOTE(tr3buchet): tear down networks on source host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ migration_ref['source_compute'], teardown=True)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.confirm_migration(migration_ref, instance_ref,
self._legacy_nw_info(network_info))
@@ -1183,6 +1191,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
+ # NOTE(tr3buchet): tear down networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ teardown=True)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@@ -1216,7 +1228,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self._instance_update(context,
- instance_ref["uuid"],
+ instance_ref['uuid'],
memory_mb=instance_type['memory_mb'],
host=migration_ref['source_compute'],
vcpus=instance_type['vcpus'],
@@ -1352,6 +1364,10 @@ class ComputeManager(manager.SchedulerDependentManager):
ephemeral_gb=instance_type['ephemeral_gb'])
resize_instance = True
+ # NOTE(tr3buchet): setup networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ migration_ref['dest_compute'])
+
network_info = self._get_instance_nw_info(context, instance_ref)
self._notify_about_instance_usage(instance_ref, "finish_resize.start",
@@ -1865,6 +1881,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.pre_live_migration(block_device_info)
+ # NOTE(tr3buchet): setup networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
# Bridge settings.
# Call this method prior to ensure_filtering_rules_for_instance,
# since bridge is not set up, ensure_filtering_rules_for instance
@@ -1928,8 +1948,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if self._get_instance_volume_bdms(context, instance_id):
rpc.call(context,
FLAGS.volume_topic,
- {"method": "check_for_export",
- "args": {'instance_id': instance_id}})
+ {'method': 'check_for_export',
+ 'args': {'instance_id': instance_id}})
if block_migration:
disk = self.driver.get_instance_disk_info(instance_ref.name)
@@ -1938,8 +1958,8 @@ class ComputeManager(manager.SchedulerDependentManager):
rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
- {"method": "pre_live_migration",
- "args": {'instance_id': instance_id,
+ {'method': 'pre_live_migration',
+ 'args': {'instance_id': instance_id,
'block_migration': block_migration,
'disk': disk}})
@@ -1988,6 +2008,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Releasing vlan.
# (not necessary in current implementation?)
+ # NOTE(tr3buchet): tear down networks on source host
+ self.network_api.setup_networks_on_host(ctxt, instance_ref,
+ self.host, teardown=True)
+
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
@@ -2070,6 +2094,14 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.info(_('Post operation of migraton started'),
instance=instance_ref)
+
+ # NOTE(tr3buchet): setup networks on destination host
+ # this is called a second time because
+ # multi_host does not create the bridge in
+ # plug_vifs
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.post_live_migration_at_destination(context, instance_ref,
self._legacy_nw_info(network_info),
@@ -2094,6 +2126,10 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.ACTIVE,
task_state=None)
+ # NOTE(tr3buchet): setup networks on source host (really it's re-setup)
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
for bdm in self._get_instance_volume_bdms(context, instance_ref['id']):
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
@@ -2121,6 +2157,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance_ref)
+ # NOTE(tr3buchet): tear down networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host, teardown=True)
+
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 18b40dfaa..48b59a680 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -638,8 +638,10 @@ class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True, nullable=False)
+ # NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
+ # NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
diff --git a/nova/flags.py b/nova/flags.py
index 8aaf56659..ed82fca30 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -44,9 +44,6 @@ class NovaConfigOpts(cfg.CommonConfigOpts):
with flagfile.handle_flagfiles_managed(argv[1:]) as args:
return argv[:1] + super(NovaConfigOpts, self).__call__(args)
- def retrieve_opt(self, opt_name, group=None):
- return self._get_opt_info(opt_name, group)
-
FLAGS = NovaConfigOpts()
diff --git a/nova/network/api.py b/nova/network/api.py
index 10d87fe6a..a3489ae98 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -176,12 +176,14 @@ class API(base.Base):
args = kwargs
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
+ args['host'] = instance['host']
rpc.cast(context, FLAGS.network_topic,
{'method': 'deallocate_for_instance',
'args': args})
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to instance from specified network."""
+ # NOTE(tr3buchet): poorly written, broken in all but flat manager
args = {'instance_id': instance_id,
'host': host,
'network_id': network_id}
@@ -191,6 +193,7 @@ class API(base.Base):
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Removes a fixed ip from instance from specified network."""
+ # NOTE(tr3buchet): poorly written, broken in all but flat manager
args = {'instance_id': instance_id,
'address': address}
rpc.cast(context, FLAGS.network_topic,
@@ -317,3 +320,20 @@ class API(base.Base):
return rpc.call(context, FLAGS.network_topic,
{'method': 'create_public_dns_domain',
'args': args})
+
+ def setup_networks_on_host(self, context, instance, host=None,
+ teardown=False):
+ """Setup or teardown the network structures on hosts related to
+ instance"""
+ host = host or instance['host']
+ # NOTE(tr3buchet): host is passed in cases where we need to setup
+ # or teardown the networks on a host which has been migrated to/from
+ # and instance['host'] is not yet or is no longer equal to
+ args = {'instance_id': instance['id'],
+ 'host': host,
+ 'teardown': teardown}
+
+ # NOTE(tr3buchet): the call is just to wait for completion
+ rpc.call(context, FLAGS.network_topic,
+ {'method': 'setup_networks_on_host',
+ 'args': args})
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 9dd75b03b..407255a40 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -190,6 +190,7 @@ class RPCAllocateFixedIP(object):
break
# NOTE(vish): if we are not multi_host pass to the network host
+ # NOTE(tr3buchet): but if we are, host came from instance['host']
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
@@ -228,6 +229,29 @@ class RPCAllocateFixedIP(object):
network = self._get_network_by_id(context, network_id)
return self.allocate_fixed_ip(context, instance_id, network, **kwargs)
+ def deallocate_fixed_ip(self, context, address, host):
+ """Call the superclass deallocate_fixed_ip if i'm the correct host
+ otherwise cast to the correct host"""
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ network = self._get_network_by_id(context, fixed_ip['network_id'])
+
+ # NOTE(vish): if we are not multi_host pass to the network host
+ # NOTE(tr3buchet): but if we are, host came from instance['host']
+ if not network['multi_host']:
+ host = network['host']
+ if host != self.host:
+ # need to call deallocate_fixed_ip on correct network host
+ topic = self.db.queue_get_for(context, FLAGS.network_topic, host)
+ args = {'address': address,
+ 'host': host}
+ rpc.cast(context, topic,
+ {'method': 'deallocate_fixed_ip',
+ 'args': args})
+ else:
+ # i am the correct host, run here
+ super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context,
+ address)
+
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution"""
@@ -738,7 +762,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# an ip address.
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
- self._setup_network(ctxt, network)
+ self._setup_network_on_host(ctxt, network)
@manager.periodic_task
def _disassociate_stale_fixed_ips(self, context):
@@ -1087,7 +1111,7 @@ class NetworkManager(manager.SchedulerDependentManager):
'network_id': network_id,
'uuid': str(utils.gen_uuid())}
# try FLAG times to create a vif record with a unique mac_address
- for _ in xrange(FLAGS.create_unique_mac_address_attempts):
+ for i in xrange(FLAGS.create_unique_mac_address_attempts):
try:
return self.db.virtual_interface_create(context, vif)
except exception.VirtualInterfaceCreateException:
@@ -1169,7 +1193,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self.instance_dns_manager.create_entry(uuid, address,
"A",
self.instance_dns_domain)
- self._setup_network(context, network)
+ self._setup_network_on_host(context, network)
return address
def deallocate_fixed_ip(self, context, address, **kwargs):
@@ -1191,10 +1215,9 @@ class NetworkManager(manager.SchedulerDependentManager):
if FLAGS.force_dhcp_release:
network = self._get_network_by_id(context,
fixed_ip_ref['network_id'])
- dev = self.driver.get_dev(network)
vif = self.db.virtual_interface_get_by_instance_and_network(
context, instance_id, network['id'])
- self.driver.release_dhcp(dev, address, vif['address'])
+ self._teardown_network_on_host(context, network, vif, address)
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
@@ -1234,7 +1257,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# the code below will update the file if necessary
if FLAGS.update_dhcp_on_disassociate:
network_ref = self.db.fixed_ip_get_network(context, address)
- self._setup_network(context, network_ref)
+ self._setup_network_on_host(context, network_ref)
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway, gateway_v6, bridge,
@@ -1425,7 +1448,65 @@ class NetworkManager(manager.SchedulerDependentManager):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
- def _setup_network(self, context, network_ref):
+ def setup_networks_on_host(self, context, instance_id, host,
+ teardown=False):
+ """calls setup/teardown on network hosts associated with an instance"""
+ green_pool = greenpool.GreenPool()
+
+ if teardown:
+ call_func = self._teardown_network_on_host
+ else:
+ call_func = self._setup_network_on_host
+
+ vifs = self.db.virtual_interface_get_by_instance(context,
+ instance_id)
+ for vif in vifs:
+ network = self.db.network_get(context, vif['network_id'])
+ fixed_ips = self.db.fixed_ips_by_virtual_interface(context,
+ vif['id'])
+ addresses = [fixed_ip['address'] for fixed_ip in fixed_ips]
+ if not network['multihost']:
+ #NOTE (tr3buchet): if using multihost, host is instance[host]
+ host = network['host']
+ if self.host == host or host is None:
+ # at this point i am the correct host, or host doesn't
+ # matter -> FlatManager
+ for address in addresses:
+ call_func(context, network, vif, address)
+ else:
+ # i'm not the right host, run call on correct host
+ topic = self.db.queue_get_for(context, FLAGS.network_topic,
+ host)
+ args = {'network_id': network['id'],
+ 'vif_id': vif['id'],
+ 'teardown': teardown}
+ for address in addresses:
+ # NOTE(tr3buchet): the call is just to wait for completion
+ args['address'] = address
+ green_pool.spawn_n(rpc.call, context, topic,
+ {'method': 'rpc_setup_network_on_host',
+ 'args': args})
+
+ # wait for all of the setups (if any) to finish
+ green_pool.waitall()
+
+ def rpc_setup_network_on_host(self, context, network_id, vif_id, address,
+ teardown):
+ if teardown:
+ call_func = self._teardown_network_on_host
+ else:
+ call_func = self._setup_network_on_host
+
+ # subcall from original setup_networks_on_host
+ vif = self.db.virtual_interface_get(context, vif_id)
+ network = self.db.network_get(context, network_id)
+ call_func(context, network, vif, address)
+
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
+ """Sets up network on this host."""
+ raise NotImplementedError()
+
+ def _teardown_network_on_host(self, context, network, vif, address):
"""Sets up network on this host."""
raise NotImplementedError()
@@ -1557,11 +1638,18 @@ class FlatManager(NetworkManager):
**kwargs)
self.db.fixed_ip_disassociate(context, address)
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Setup Network on this host."""
+ # NOTE(tr3buchet): this does not need to happen on every ip
+ # allocation, this functionality makes more sense in create_network
+ # but we'd have to move the flat_injected flag to compute
net = {}
net['injected'] = FLAGS.flat_injected
- self.db.network_update(context, network_ref['id'], net)
+ self.db.network_update(context, network['id'], net)
+
+ def _teardown_network_on_host(self, context, network, vif, address):
+ """Tear down netowrk on this host."""
+ pass
class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
@@ -1584,21 +1672,26 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Sets up network on this host."""
- network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
+ network['dhcp_server'] = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_gateway(network_ref)
+ self.l3driver.initialize_gateway(network)
if not FLAGS.fake_network:
- dev = self.driver.get_dev(network_ref)
- self.driver.update_dhcp(context, dev, network_ref)
+ dev = self.driver.get_dev(network)
+ self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, dev, network_ref)
+ self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
- self.db.network_update(context, network_ref['id'],
+ self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
+ def _teardown_network_on_host(self, context, network, vif, address):
+ if not FLAGS.fake_network:
+ dev = self.driver.get_dev(network)
+ self.driver.release_dhcp(dev, address, vif['address'])
+
def _get_network_by_id(self, context, network_id):
return NetworkManager._get_network_by_id(self, context.elevated(),
network_id)
@@ -1675,7 +1768,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
- self._setup_network(context, network)
+ self._setup_network_on_host(context, network)
return address
@wrap_check_policy
@@ -1713,35 +1806,40 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
NetworkManager.create_networks(self, context, vpn=True, **kwargs)
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Sets up network on this host."""
- if not network_ref['vpn_public_address']:
+ if not network['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
- network_ref = db.network_update(context, network_ref['id'], net)
+ network = db.network_update(context, network['id'], net)
else:
- address = network_ref['vpn_public_address']
- network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
+ address = network['vpn_public_address']
+ network['dhcp_server'] = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_gateway(network_ref)
+ self.l3driver.initialize_gateway(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.l3driver.add_vpn(FLAGS.vpn_ip,
- network_ref['vpn_public_port'],
- network_ref['vpn_private_address'])
+ network['vpn_public_port'],
+ network['vpn_private_address'])
if not FLAGS.fake_network:
- dev = self.driver.get_dev(network_ref)
- self.driver.update_dhcp(context, dev, network_ref)
+ dev = self.driver.get_dev(network)
+ self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, dev, network_ref)
+ self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
- self.db.network_update(context, network_ref['id'],
+ self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
+ def _teardown_network_on_host(self, context, network, vif, address):
+ if not FLAGS.fake_network:
+ dev = self.driver.get_dev(network)
+ self.driver.release_dhcp(dev, address, vif['address'])
+
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
diff --git a/nova/network/model.py b/nova/network/model.py
index 5397574a8..843416591 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -211,21 +211,25 @@ class VIF(Model):
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
- """ returns the list of all IPs in this flat structure:
- {'network_label': 'my_network',
- 'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
- 'ips': [{'address': '123.123.123.123',
- 'version': 4,
- 'type: 'fixed',
- 'meta': {...}},
- {'address': '124.124.124.124',
- 'version': 4,
- 'type': 'floating',
- 'meta': {...}},
- {'address': 'fe80::4',
- 'version': 6,
- 'type': 'fixed',
- 'meta': {...}}]"""
+ """Returns the list of all IPs
+
+ The return value looks like this flat structure::
+
+ {'network_label': 'my_network',
+ 'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
+ 'ips': [{'address': '123.123.123.123',
+ 'version': 4,
+ 'type: 'fixed',
+ 'meta': {...}},
+ {'address': '124.124.124.124',
+ 'version': 4,
+ 'type': 'floating',
+ 'meta': {...}},
+ {'address': 'fe80::4',
+ 'version': 6,
+ 'type': 'fixed',
+ 'meta': {...}}]
+ """
if self['network']:
# remove unecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py
index fad8ecc33..16e782fa8 100644
--- a/nova/network/quantum/manager.py
+++ b/nova/network/quantum/manager.py
@@ -689,3 +689,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
leases_text += text
LOG.debug("DHCP leases: %s" % leases_text)
return leases_text
+
+ def setup_networks_on_host(self, *args, **kwargs):
+ # no host specific setup is needed in quantum manager
+ pass
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index f4532e828..5af16cbac 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -79,35 +79,35 @@ def publisher_id(service, host=None):
def notify(publisher_id, event_type, priority, payload):
- """
- Sends a notification using the specified driver
-
- Notify parameters:
+ """Sends a notification using the specified driver
- publisher_id - the source worker_type.host of the message
- event_type - the literal type of event (ex. Instance Creation)
- priority - patterned after the enumeration of Python logging levels in
- the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
- payload - A python dictionary of attributes
+ :param publisher_id: the source worker_type.host of the message
+ :param event_type: the literal type of event (ex. Instance Creation)
+ :param priority: patterned after the enumeration of Python logging
+ levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
+ :param payload: A python dictionary of attributes
Outgoing message format includes the above parameters, and appends the
following:
- message_id - a UUID representing the id for this notification
- timestamp - the GMT timestamp the notification was sent at
+ message_id
+ a UUID representing the id for this notification
+
+ timestamp
+ the GMT timestamp the notification was sent at
The composite message will be constructed as a dictionary of the above
attributes, which will then be sent via the transport mechanism defined
by the driver.
- Message example:
+ Message example::
- {'message_id': str(uuid.uuid4()),
- 'publisher_id': 'compute.host1',
- 'timestamp': utils.utcnow(),
- 'priority': 'WARN',
- 'event_type': 'compute.create_instance',
- 'payload': {'instance_id': 12, ... }}
+ {'message_id': str(uuid.uuid4()),
+ 'publisher_id': 'compute.host1',
+ 'timestamp': utils.utcnow(),
+ 'priority': 'WARN',
+ 'event_type': 'compute.create_instance',
+ 'payload': {'instance_id': 12, ... }}
"""
if priority not in log_levels:
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index 4f1ce12d1..f19c8826e 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -24,7 +24,7 @@ run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
-S3 client with this module:
+S3 client with this module::
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index b0240769e..18ae002df 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -17,7 +17,9 @@
r"""
Configuration options which may be set on the command line or in config files.
-The schema for each option is defined using the Opt sub-classes e.g.
+The schema for each option is defined using the Opt sub-classes, e.g.:
+
+::
common_opts = [
cfg.StrOpt('bind_host',
@@ -28,7 +30,7 @@ The schema for each option is defined using the Opt sub-classes e.g.
help='Port number to listen on')
]
-Options can be strings, integers, floats, booleans, lists or 'multi strings':
+Options can be strings, integers, floats, booleans, lists or 'multi strings'::
enabled_apis_opt = cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute'],
@@ -41,7 +43,7 @@ Options can be strings, integers, floats, booleans, lists or 'multi strings':
default=DEFAULT_EXTENSIONS)
Option schemas are registered with with the config manager at runtime, but
-before the option is referenced:
+before the option is referenced::
class ExtensionManager(object):
@@ -57,7 +59,7 @@ before the option is referenced:
....
A common usage pattern is for each option schema to be defined in the module or
-class which uses the option:
+class which uses the option::
opts = ...
@@ -72,7 +74,7 @@ class which uses the option:
An option may optionally be made available via the command line. Such options
must registered with the config manager before the command line is parsed (for
-the purposes of --help and CLI arg validation):
+the purposes of --help and CLI arg validation)::
cli_opts = [
cfg.BoolOpt('verbose',
@@ -88,7 +90,7 @@ the purposes of --help and CLI arg validation):
def add_common_opts(conf):
conf.register_cli_opts(cli_opts)
-The config manager has a single CLI option defined by default, --config-file:
+The config manager has a single CLI option defined by default, --config-file::
class ConfigOpts(object):
@@ -101,7 +103,7 @@ The config manager has a single CLI option defined by default, --config-file:
Option values are parsed from any supplied config files using SafeConfigParser.
If none are specified, a default set is used e.g. glance-api.conf and
-glance-common.conf:
+glance-common.conf::
glance-api.conf:
[DEFAULT]
@@ -116,7 +118,7 @@ are parsed in order, with values in later files overriding those in earlier
files.
The parsing of CLI args and config files is initiated by invoking the config
-manager e.g.
+manager e.g.::
conf = ConfigOpts()
conf.register_opt(BoolOpt('verbose', ...))
@@ -124,7 +126,7 @@ manager e.g.
if conf.verbose:
...
-Options can be registered as belonging to a group:
+Options can be registered as belonging to a group::
rabbit_group = cfg.OptionGroup(name='rabbit',
title='RabbitMQ options')
@@ -143,7 +145,7 @@ Options can be registered as belonging to a group:
conf.register_opt(rabbit_port_opt, group='rabbit')
If no group is specified, options belong to the 'DEFAULT' section of config
-files:
+files::
glance-api.conf:
[DEFAULT]
@@ -158,13 +160,14 @@ files:
password = guest
virtual_host = /
-Command-line options in a group are automatically prefixed with the group name:
+Command-line options in a group are automatically prefixed with the
+group name::
--rabbit-host localhost --rabbit-port 9999
Option values in the default group are referenced as attributes/properties on
the config manager; groups are also attributes on the config manager, with
-attributes for each of the options associated with the group:
+attributes for each of the options associated with the group::
server.start(app, conf.bind_port, conf.bind_host, conf)
@@ -173,7 +176,7 @@ attributes for each of the options associated with the group:
port=conf.rabbit.port,
...)
-Option values may reference other values using PEP 292 string substitution:
+Option values may reference other values using PEP 292 string substitution::
opts = [
cfg.StrOpt('state_path',
@@ -191,15 +194,15 @@ Note that interpolation can be avoided by using '$$'.
For command line utilities that dispatch to other command line utilities, the
disable_interspersed_args() method is available. If this this method is called,
-then parsing e.g.
+then parsing e.g.::
script --verbose cmd --debug /tmp/mything
-will no longer return:
+will no longer return::
['cmd', '/tmp/mything']
-as the leftover arguments, but will instead return:
+as the leftover arguments, but will instead return::
['cmd', '--debug', '/tmp/mything']
@@ -307,9 +310,12 @@ class ConfigFileValueError(Error):
def find_config_files(project=None, prog=None):
"""Return a list of default configuration files.
+ :param project: an optional project name
+ :param prog: the program name, defaulting to the basename of sys.argv[0]
+
We default to two config files: [${project}.conf, ${prog}.conf]
- And we look for those config files in the following directories:
+ And we look for those config files in the following directories::
~/.${project}/
~/
@@ -324,9 +330,6 @@ def find_config_files(project=None, prog=None):
'~/.foo/bar.conf']
If no project name is supplied, we only look for ${prog.conf}.
-
- :param project: an optional project name
- :param prog: the program name, defaulting to the basename of sys.argv[0]
"""
if prog is None:
prog = os.path.basename(sys.argv[0])
diff --git a/nova/policy.py b/nova/policy.py
index 9f6e6c9e2..b9f81cf5b 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -68,15 +68,15 @@ def enforce(context, action, target):
:param context: nova context
:param action: string representing the action to be checked
this should be colon separated for clarity.
- i.e. compute:create_instance
- compute:attach_volume
- volume:attach_volume
+ i.e. ``compute:create_instance``,
+ ``compute:attach_volume``,
+ ``volume:attach_volume``
:param object: dictionary representing the object of the action
for object creation this should be a dictionary representing the
- location of the object e.g. {'project_id': context.project_id}
+ location of the object e.g. ``{'project_id': context.project_id}``
- :raises: `nova.exception.PolicyNotAllowed` if verification fails.
+ :raises nova.exception.PolicyNotAllowed: if verification fails.
"""
init()
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 74b5c308b..292161867 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -194,6 +194,7 @@ class Scheduler(object):
block_migration=False,
disk_over_commit=False):
"""Live migration scheduling method.
+
:param context:
:param instance_id:
:param dest: destination host
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 899cfd38c..8949b983d 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -79,17 +79,20 @@ def compute_fill_first_cost_fn(host_state, weighing_properties):
def weighted_sum(weighted_fns, host_states, weighing_properties):
"""Use the weighted-sum method to compute a score for an array of objects.
+
Normalize the results of the objective-functions so that the weights are
meaningful regardless of objective-function's range.
- host_list - [(host, HostInfo()), ...]
- weighted_fns - list of weights and functions like:
+ :param host_list: ``[(host, HostInfo()), ...]``
+ :param weighted_fns: list of weights and functions like::
+
[(weight, objective-functions), ...]
- weighing_properties is an arbitrary dict of values that can influence
- weights.
- Returns a single WeightedHost object which represents the best
- candidate.
+ :param weighing_properties: an arbitrary dict of values that can
+ influence weights.
+
+ :returns: a single WeightedHost object which represents the best
+ candidate.
"""
# Make a grid of functions results.
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 57bd445ff..a0a34eb72 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -179,11 +179,12 @@ class SchedulerManager(manager.Manager):
:param context: security context
:param host: hostname
:returns:
- example format is below.
- {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
- D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
- 'vcpus_used': 12, 'memory_mb_used': 10240,
- 'local_gb_used': 64}
+ example format is below::
+
+ {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
+ D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
+ 'vcpus_used': 12, 'memory_mb_used': 10240,
+ 'local_gb_used': 64}
"""
# Getting compute node info and related instances info
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 52cb670ca..5a64f237e 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -218,7 +218,8 @@ class CloudTestCase(test.TestCase):
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
- self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
+ inst['host'])
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 87cb2d3fe..96a62017b 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -85,7 +85,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_raise(self):
- """Ensure the ability to raise `Fault`s in WSGI-ified methods."""
+ """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 24a383f80..ef416543d 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -43,10 +43,10 @@ from nova import exception
from nova import flags
from nova.image import fake as fake_image
from nova import log as logging
-from nova.network.quantum import client as quantum_client
from nova.notifier import test_notifier
import nova.policy
from nova import rpc
+from nova.rpc import common as rpc_common
from nova.scheduler import driver as scheduler_driver
from nova import test
from nova.tests import fake_network
@@ -942,13 +942,13 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False).AndRaise(quantum_client.QuantumServerException())
+ vpn=False).AndRaise(rpc_common.RemoteError())
self.flags(stub_network=False)
self.mox.ReplayAll()
- self.assertRaises(quantum_client.QuantumServerException,
+ self.assertRaises(rpc_common.RemoteError,
self.compute.run_instance,
self.context,
instance_uuid)
@@ -959,6 +959,27 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance['uuid'])
+ def test_instance_set_to_error_on_deleted_instance_doesnt_raise(self):
+ """Test that we don't raise InstanceNotFound when trying to set
+ an instance to ERROR that has already been deleted from under us.
+ The original exception should be re-raised.
+ """
+ instance = self._create_fake_instance()
+ instance_uuid = instance['uuid']
+
+ def fake_allocate_network(context, instance, requested_networks):
+ # Remove the instance to simulate race condition
+ self.compute.terminate_instance(self.context, instance['uuid'])
+ raise rpc_common.RemoteError()
+
+ self.stubs.Set(self.compute, '_allocate_network',
+ fake_allocate_network)
+
+ self.assertRaises(rpc_common.RemoteError,
+ self.compute.run_instance,
+ self.context,
+ instance_uuid)
+
def test_network_is_deallocated_on_spawn_failure(self):
"""When a spawn fails the network must be deallocated"""
instance = self._create_fake_instance()
@@ -1387,6 +1408,10 @@ class ComputeTestCase(BaseTestCase):
'disk': None}
}).AndRaise(rpc.common.RemoteError('', '', ''))
# mocks for rollback
+ rpc.call(c, 'network', {'method': 'setup_networks_on_host',
+ 'args': {'instance_id': instance_id,
+ 'host': self.compute.host,
+ 'teardown': False}})
rpc.call(c, topic, {"method": "remove_volume_connection",
"args": {'instance_id': instance_id,
'volume_id': volume_id}})
@@ -1455,6 +1480,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref, [])
self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, 'network', {'method': 'setup_networks_on_host',
+ 'args': {'instance_id': instance_id,
+ 'host': self.compute.host,
+ 'teardown': True}})
rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance_id': i_ref['id'], 'block_migration': False}})
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 514135244..0baf38236 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -147,7 +147,7 @@ class LockTestCase(test.TestCase):
self.assertEquals(e.errno, errno.EPIPE)
return
- rfds, _, __ = select.select([rpipe], [], [], 1)
+ rfds, _wfds, _efds = select.select([rpipe], [], [], 1)
self.assertEquals(len(rfds), 0, "The other process, which was"
" supposed to be locked, "
"wrote on its end of the "
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 05b931617..ba9dd6e12 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -838,7 +838,7 @@ class VlanNetworkTestCase(test.TestCase):
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
- self.network.deallocate_fixed_ip(context1, fix_addr)
+ self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index cc7dabd73..89f5693da 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -202,10 +202,10 @@ class XenAPIVolumeTestCase(test.TestCase):
volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
- self.assertRaises(Exception,
+ self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
+ {'driver_volume_type': 'nonexist'},
instance.name,
- volume['id'],
'/dev/sdc')
diff --git a/nova/utils.py b/nova/utils.py
index 10a8c304b..712242fd5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -116,23 +116,25 @@ def vpn_ping(address, port, timeout=0.05, session_id=None):
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
- 0 1 8 9 13
- +-+--------+-----+
- |x| cli_id |?????|
- +-+--------+-----+
- x = packet identifier 0x38
- cli_id = 64 bit identifier
- ? = unknown, probably flags/padding
+
+ 0 1 8 9 13
+ +-+--------+-----+
+ |x| cli_id |?????|
+ +-+--------+-----+
+ x = packet identifier 0x38
+ cli_id = 64 bit identifier
+ ? = unknown, probably flags/padding
Server packet (26 bytes)::
- 0 1 8 9 13 14 21 2225
- +-+--------+-----+--------+----+
- |x| srv_id |?????| cli_id |????|
- +-+--------+-----+--------+----+
- x = packet identifier 0x40
- cli_id = 64 bit identifier
- ? = unknown, probably flags/padding
- bit 9 was 1 and the rest were 0 in testing
+
+ 0 1 8 9 13 14 21 2225
+ +-+--------+-----+--------+----+
+ |x| srv_id |?????| cli_id |????|
+ +-+--------+-----+--------+----+
+ x = packet identifier 0x40
+ cli_id = 64 bit identifier
+ ? = unknown, probably flags/padding
+ bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
@@ -162,27 +164,29 @@ def fetchfile(url, target):
def execute(*cmd, **kwargs):
- """
- Helper method to execute command with optional retry.
+ """Helper method to execute command with optional retry.
+
If you add a run_as_root=True command, don't forget to add the
corresponding filter to nova.rootwrap !
- :cmd Passed to subprocess.Popen.
- :process_input Send to opened process.
- :check_exit_code Single bool, int, or list of allowed exit codes.
- Defaults to [0]. Raise exception.ProcessExecutionError
- unless program exits with one of these code.
- :delay_on_retry True | False. Defaults to True. If set to True, wait a
- short amount of time before retrying.
- :attempts How many times to retry cmd.
- :run_as_root True | False. Defaults to False. If set to True,
- the command is prefixed by the command specified
- in the root_helper FLAG.
-
- :raises exception.Error on receiving unknown arguments
- :raises exception.ProcessExecutionError
-
- :returns a tuple, (stdout, stderr) from the spawned process, or None if
+ :param cmd: Passed to subprocess.Popen.
+ :param process_input: Send to opened process.
+ :param check_exit_code: Single bool, int, or list of allowed exit
+ codes. Defaults to [0]. Raise
+ exception.ProcessExecutionError unless
+ program exits with one of these code.
+ :param delay_on_retry: True | False. Defaults to True. If set to
+ True, wait a short amount of time
+ before retrying.
+ :param attempts: How many times to retry cmd.
+ :param run_as_root: True | False. Defaults to False. If set to True,
+ the command is prefixed by the command specified
+ in the root_helper FLAG.
+
+ :raises exception.Error: on receiving unknown arguments
+ :raises exception.ProcessExecutionError:
+
+ :returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
@@ -795,21 +799,23 @@ _semaphores = {}
def synchronized(name, external=False):
"""Synchronization decorator.
- Decorating a method like so:
- @synchronized('mylock')
- def foo(self, *args):
- ...
+ Decorating a method like so::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
ensures that only one thread will execute the bar method at a time.
- Different methods can share the same lock:
- @synchronized('mylock')
- def foo(self, *args):
- ...
+ Different methods can share the same lock::
+
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
- @synchronized('mylock')
- def bar(self, *args):
- ...
+ @synchronized('mylock')
+ def bar(self, *args):
+ ...
This way only one of either foo or bar can be executing at a time.
@@ -1601,7 +1607,8 @@ class UndoManager(object):
def rollback_and_reraise(self, msg=None):
"""Rollback a series of actions then re-raise the exception.
- NOTE(sirp): This should only be called within an exception handler.
+ .. note:: (sirp) This should only be called within an
+ exception handler.
"""
with save_and_reraise_exception():
if msg:
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 15fb70f77..9658273db 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -128,25 +128,29 @@ class ComputeDriver(object):
Return the number of virtual machines that the hypervisor knows
about.
- :note This implementation works for all drivers, but it is
- not particularly efficient. Maintainers of the virt drivers are
- encouraged to override this method with something more
- efficient.
+ .. note::
+
+ This implementation works for all drivers, but it is
+ not particularly efficient. Maintainers of the virt drivers are
+ encouraged to override this method with something more
+ efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
+ :param instance_id: The ID / name of the instance to lookup
+
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
- :note This implementation works for all drivers, but it is
- not particularly efficient. Maintainers of the virt drivers are
- encouraged to override this method with something more
- efficient.
+ .. note::
- :param instance_id: The ID / name of the instance to lookup
+ This implementation works for all drivers, but it is
+ not particularly efficient. Maintainers of the virt drivers are
+ encouraged to override this method with something more
+ efficient.
"""
return instance_id in self.list_instances()
@@ -422,7 +426,7 @@ class ComputeDriver(object):
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
- :method:`refresh_security_group_rules`) but is called where references
+ :py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
@@ -436,7 +440,7 @@ class ComputeDriver(object):
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
- :method:`nova.db.provider_fw_rule_get_all`.
+ :py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
@@ -669,12 +673,12 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_volume_connector(self, instance):
- """
- Get connector information for the instance for attaching to volumes.
+ """Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection and and the name of the
- iscsi initiator as follows:
+ iscsi initiator as follows::
+
{
'ip': ip,
'initiator': initiator,
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index aacffb62c..6be69bd4d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -36,7 +36,7 @@ from nova.virt import driver
LOG = logging.getLogger(__name__)
-def get_connection(_=None):
+def get_connection(_read_only):
# The read_only parameter is ignored.
return FakeConnection.instance()
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index b367aded2..9df036412 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -88,7 +88,7 @@ class FirewallDriver(object):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
- :method:`prepare_instance_filter`.
+ :py:meth:`prepare_instance_filter`.
"""
raise NotImplementedError()
@@ -362,7 +362,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.add_filters_for_instance(instance)
def refresh_provider_fw_rules(self):
- """See class:FirewallDriver: docs."""
+ """See :class:`FirewallDriver` docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index af780430e..b292c932d 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -2036,12 +2036,12 @@ class LibvirtConnection(driver.ComputeDriver):
block_migration):
"""Post operation of live migration at destination host.
- :params ctxt: security context
- :params instance_ref:
+ :param ctxt: security context
+ :param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
- :params network_info: instance network infomation
- :params : block_migration: if true, post operation of block_migraiton.
+ :param network_info: instance network infomation
+ :param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
@@ -2070,11 +2070,12 @@ class LibvirtConnection(driver.ComputeDriver):
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
- json strings with below format.
- "[{'path':'disk', 'type':'raw',
- 'virt_disk_size':'10737418240',
- 'backing_file':'backing_file',
- 'disk_size':'83886080'},...]"
+ json strings with below format::
+
+ "[{'path':'disk', 'type':'raw',
+ 'virt_disk_size':'10737418240',
+ 'backing_file':'backing_file',
+ 'disk_size':'83886080'},...]"
"""
disk_info = []
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 4574dafdb..f4cbc2506 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -87,12 +87,13 @@ class VMWareVMOps(object):
Creates a VM instance.
Steps followed are:
+
1. Create a VM with no disk and the specifics in the instance object
- like RAM size.
+ like RAM size.
2. Create a dummy vmdk of the size of the disk file that is to be
- uploaded. This is required just to create the metadata file.
+ uploaded. This is required just to create the metadata file.
3. Delete the -flat.vmdk file created in the above step and retain
- the metadata .vmdk file.
+ the metadata .vmdk file.
4. Upload the disk file.
5. Attach the disk to the VM by reconfiguring the same.
6. Power on the VM.
@@ -335,16 +336,17 @@ class VMWareVMOps(object):
_power_on_vm()
def snapshot(self, context, instance, snapshot_name):
- """
- Create snapshot from a running VM instance.
+ """Create snapshot from a running VM instance.
+
Steps followed are:
+
1. Get the name of the vmdk file which the VM points to right now.
- Can be a chain of snapshots, so we need to know the last in the
- chain.
+ Can be a chain of snapshots, so we need to know the last in the
+ chain.
2. Create the snapshot. A new vmdk is created which the VM points to
- now. The earlier vmdk becomes read-only.
+ now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
- vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
+ vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 93cf9b6d2..8259974a3 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -95,7 +95,7 @@ class Failure(Exception):
return str(self.details)
-def get_connection(_):
+def get_connection(_read_only):
"""Sets up the ESX host connection."""
host_ip = FLAGS.vmwareapi_host_ip
host_username = FLAGS.vmwareapi_host_username
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 5813133d8..97ca29184 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -704,7 +704,7 @@ class SessionBase(object):
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
- table, _ = name.split('.')
+ table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 5661fa01a..7448ae357 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -212,5 +212,5 @@ def forward_request(context, request_type, master, aggregate_id,
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
- _, sep, port = temp_url.netloc.partition(':')
+ _netloc, sep, port = temp_url.netloc.partition(':')
return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index e117b3d0a..59bf7cd0a 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -95,15 +95,15 @@ KERNEL_DIR = '/boot/guest'
class ImageType:
- """
- Enumeration class for distinguishing different image types
- 0 - kernel image (goes on dom0's filesystem)
- 1 - ramdisk image (goes on dom0's filesystem)
- 2 - disk image (local SR, partitioned by objectstore plugin)
- 3 - raw disk image (local SR, NOT partitioned by plugin)
- 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
- linux, HVM assumed for Windows)
- 5 - ISO disk image (local SR, NOT partitioned by plugin)
+ """Enumeration class for distinguishing different image types
+
+ | 0 - kernel image (goes on dom0's filesystem)
+ | 1 - ramdisk image (goes on dom0's filesystem)
+ | 2 - disk image (local SR, partitioned by objectstore plugin)
+ | 3 - raw disk image (local SR, NOT partitioned by plugin)
+ | 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ | linux, HVM assumed for Windows)
+ | 5 - ISO disk image (local SR, NOT partitioned by plugin)
"""
KERNEL = 0
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 658a6da40..bacf46b02 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -99,7 +99,7 @@ def make_step_decorator(context, instance):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
- Each time the decorator is invoked we bump the total-step-count, so after:
+ Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
@@ -112,11 +112,12 @@ def make_step_decorator(context, instance):
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
- the current-step-count by 1, so after:
+ the current-step-count by 1, so after::
step1()
- the current-step-count would be 1 giving a progress of 1 / 2 * 100 or 50%.
+ the current-step-count would be 1 giving a progress of ``1 / 2 *
+ 100`` or 50%.
"""
instance_uuid = instance['uuid']
@@ -650,18 +651,18 @@ class VMOps(object):
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
- creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD
+ creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
- a 'base-copy' VDI. The base_copy is immutable and may be chained
- with other base_copies. If chained, the base_copies
- coalesce together, so, we must wait for this coalescing to occur to
- get a stable representation of the data on disk.
+ a 'base-copy' VDI. The base_copy is immutable and may be chained
+ with other base_copies. If chained, the base_copies
+ coalesce together, so, we must wait for this coalescing to occur to
+ get a stable representation of the data on disk.
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
- that will bundle the VHDs together and then push the bundle into
- Glance.
+ that will bundle the VHDs together and then push the bundle into
+ Glance.
"""
template_vm_ref = None
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index ececea2ee..05e77ef0d 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -153,7 +153,7 @@ FLAGS = flags.FLAGS
FLAGS.register_opts(xenapi_opts)
-def get_connection(_):
+def get_connection(_read_only):
"""Note that XenAPI doesn't have a read-only connection mode, so
the read_only parameter is ignored."""
url = FLAGS.xenapi_connection_url
@@ -536,7 +536,7 @@ class XenAPISession(object):
return url
def _populate_session_pool(self, url, user, pw, exception):
- for _ in xrange(FLAGS.xenapi_connection_concurrent - 1):
+ for i in xrange(FLAGS.xenapi_connection_concurrent - 1):
session = self._create_session(url)
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 924991860..a8ed76a97 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -238,13 +238,15 @@ class ISCSIDriver(VolumeDriver):
We make use of model provider properties as follows:
- :provider_location: if present, contains the iSCSI target information
- in the same format as an ietadm discovery
- i.e. '<ip>:<port>,<portal> <target IQN>'
-
- :provider_auth: if present, contains a space-separated triple:
- '<auth method> <auth username> <auth password>'.
- `CHAP` is the only auth_method in use at the moment.
+ ``provider_location``
+ if present, contains the iSCSI target information in the same
+ format as an ietadm discovery
+ i.e. '<ip>:<port>,<portal> <target IQN>'
+
+ ``provider_auth``
+ if present, contains a space-separated triple:
+ '<auth method> <auth username> <auth password>'.
+ `CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
@@ -424,7 +426,8 @@ class ISCSIDriver(VolumeDriver):
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
- Example return value:
+ Example return value::
+
{
'driver_volume_type': 'iscsi'
'data': {
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 8257f0096..9da7c8be8 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -266,7 +266,8 @@ class VolumeManager(manager.SchedulerDependentManager):
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
- following format:
+ following format::
+
{
'ip': ip,
'initiator': initiator,
@@ -279,7 +280,8 @@ class VolumeManager(manager.SchedulerDependentManager):
connections.
driver is responsible for doing any necessary security setup and
- returning a connection_info dictionary in the following format:
+ returning a connection_info dictionary in the following format::
+
{
'driver_volume_type': driver_volume_type,
'data': data,
diff --git a/nova/vsa/api.py b/nova/vsa/api.py
index 6482a11c1..1a03dfb1b 100644
--- a/nova/vsa/api.py
+++ b/nova/vsa/api.py
@@ -19,8 +19,9 @@
Handles all requests relating to Virtual Storage Arrays (VSAs).
Experimental code. Requires special VSA image.
+
For assistance and guidelines pls contact
- Zadara Storage Inc & Openstack community
+Zadara Storage Inc & Openstack community
"""
from nova import compute
@@ -143,9 +144,8 @@ class API(base.Base):
def create(self, context, display_name='', display_description='',
vc_count=1, instance_type=None, image_name=None,
availability_zone=None, storage=[], shared=None):
- """
- Provision VSA instance with corresponding compute instances
- and associated volumes
+ """Provision VSA instance with compute instances and volumes
+
:param storage: List of dictionaries with following keys:
disk_name, num_disks, size
:param shared: Specifies if storage is dedicated or shared.
diff --git a/run_tests.sh b/run_tests.sh
index 6d8cb7cae..cf7e39f0e 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -67,7 +67,7 @@ for arg in "$@"; do
process_option $arg
done
-# If enabled, tell nose to collect coverage data
+# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=nova"
fi
diff --git a/tools/conf/create_conf.py b/tools/conf/create_conf.py
index af702f732..361c9efef 100644
--- a/tools/conf/create_conf.py
+++ b/tools/conf/create_conf.py
@@ -16,9 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Generates a nova.conf file.
-
-"""
+"""Generates a nova.conf file."""
import os
import re
@@ -52,9 +50,12 @@ def main(srcfiles):
"console", "consoleauth", "image"]
return prefer.index(pkg_str) if pkg_str in prefer else ord(pkg_str[0])
- print '#', 'nova.conf sample\n'
+ print '#' * 20 + '\n# nova.conf sample #\n' + '#' * 20
# NOTE(lzyeval): sort top level modules and packages
# to process modules first
+ print
+ print '[DEFAULT]'
+ print
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[3]
@@ -96,14 +97,14 @@ def print_module(mod_str):
# check if option was processed
if opt_name in _OPTION_CACHE:
continue
- opt_dict = flags.retrieve_opt(opt_name)
+ opt_dict = flags._get_opt_info(opt_name)
opts.append(opt_dict['opt'])
_OPTION_CACHE.append(opt_name)
# return if flags has no unique options
if not opts:
return
# print out module info
- print ''.join(['[', mod_str, ']'])
+ print '######### defined in %s #########' % mod_str
print
for opt in opts:
print_opt(opt)
@@ -118,16 +119,21 @@ def print_opt(opt):
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
# print out option info
- print "#", "".join(["(", opt_type, ")"]), opt.help
- if opt_type == _BOOLOPT:
- print "# default: %s" % opt.default
- print "#", ''.join(["--", opt.name])
+ print "######", "".join(["(", opt_type, ")"]), opt.help
+ if opt.default is None:
+ print '# %s=<None>' % opt.name
else:
- opt_value = str(opt.default)
- if (opt.default is None or (opt_type == _STROPT and not opt.default)):
- opt_value = "<%s>" % opt.name
- print "#", ''.join(["--", opt.name, "=", opt_value])
- print
+ if opt_type == 'StrOpt':
+ print '# %s="%s"' % (opt.name, opt.default)
+ elif opt_type == 'ListOpt':
+ print '# %s="%s"' % (opt.name, ','.join(opt.default))
+ elif opt_type == 'MultiStrOpt':
+ for default in opt.default:
+ print '# %s="%s"' % (opt.name, default)
+ elif opt_type == 'BoolOpt':
+ print '# %s=%s' % (opt.name, str(opt.default).lower())
+ else:
+ print '# %s=%s' % (opt.name, opt.default)
if __name__ == '__main__':
diff --git a/tools/conf/run.sh b/tools/conf/generate_sample.sh
index f03a77f67..8a4f55524 100755
--- a/tools/conf/run.sh
+++ b/tools/conf/generate_sample.sh
@@ -1,3 +1,4 @@
+#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
@@ -15,5 +16,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+pushd $(cd $(dirname "$0") && pwd) >/dev/null
-find ../../nova -type f -name "*.py" ! -path "../../nova/tests/*" -exec grep -l "Opt(" {} \; | sort -u | xargs python create_conf.py > nova.conf.sample
+find ../../nova -type f -name "*.py" ! -path "../../nova/tests/*" -exec \
+ grep -l "Opt(" {} \; | sort -u | xargs python create_conf.py > \
+ ../../etc/nova/nova.conf.sample
+
+popd >/dev/null
diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py
index 5158d883a..8c8b4dfc5 100644
--- a/tools/esx/guest_tool.py
+++ b/tools/esx/guest_tool.py
@@ -1,404 +1,404 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Guest tools for ESX to set up network in the guest.
-On Windows we require pyWin32 installed on Python.
-"""
-
-import array
-import gettext
-import logging
-import os
-import platform
-import socket
-import struct
-import subprocess
-import sys
-import time
-
-gettext.install('nova', unicode=1)
-
-PLATFORM_WIN = 'win32'
-PLATFORM_LINUX = 'linux2'
-ARCH_32_BIT = '32bit'
-ARCH_64_BIT = '64bit'
-NO_MACHINE_ID = 'No machine id'
-
-# Logging
-FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
-if sys.platform == PLATFORM_WIN:
- LOG_DIR = os.path.join(os.environ.get('ALLUSERSPROFILE'), 'openstack')
-elif sys.platform == PLATFORM_LINUX:
- LOG_DIR = '/var/log/openstack'
-else:
- LOG_DIR = 'logs'
-if not os.path.exists(LOG_DIR):
- os.mkdir(LOG_DIR)
-LOG_FILENAME = os.path.join(LOG_DIR, 'openstack-guest-tools.log')
-logging.basicConfig(filename=LOG_FILENAME, format=FORMAT)
-
-if sys.hexversion < 0x3000000:
- _byte = ord # 2.x chr to integer
-else:
- _byte = int # 3.x byte to integer
-
-
-class ProcessExecutionError:
- """Process Execution Error Class."""
-
- def __init__(self, exit_code, stdout, stderr, cmd):
- self.exit_code = exit_code
- self.stdout = stdout
- self.stderr = stderr
- self.cmd = cmd
-
- def __str__(self):
- return str(self.exit_code)
-
-
-def _bytes2int(bytes):
- """Convert bytes to int."""
- intgr = 0
- for byt in bytes:
- intgr = (intgr << 8) + _byte(byt)
- return intgr
-
-
-def _parse_network_details(machine_id):
- """
- Parse the machine_id to get MAC, IP, Netmask and Gateway fields per NIC.
- machine_id is of the form ('NIC_record#NIC_record#', '')
- Each of the NIC will have record NIC_record in the form
- 'MAC;IP;Netmask;Gateway;Broadcast;DNS' where ';' is field separator.
- Each record is separated by '#' from next record.
- """
- logging.debug(_("Received machine_id from vmtools : %s") % machine_id[0])
- network_details = []
- if machine_id[1].strip() == "1":
- pass
- else:
- for machine_id_str in machine_id[0].split('#'):
- network_info_list = machine_id_str.split(';')
- if len(network_info_list) % 6 != 0:
- break
- no_grps = len(network_info_list) / 6
- i = 0
- while i < no_grps:
- k = i * 6
- network_details.append((
- network_info_list[k].strip().lower(),
- network_info_list[k + 1].strip(),
- network_info_list[k + 2].strip(),
- network_info_list[k + 3].strip(),
- network_info_list[k + 4].strip(),
- network_info_list[k + 5].strip().split(',')))
- i += 1
- logging.debug(_("NIC information from vmtools : %s") % network_details)
- return network_details
-
-
-def _get_windows_network_adapters():
- """Get the list of windows network adapters."""
- import win32com.client
- wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
- wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
- wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
- network_adapters = []
- for wbem_network_adapter in wbem_network_adapters:
- if wbem_network_adapter.NetConnectionStatus == 2 or \
- wbem_network_adapter.NetConnectionStatus == 7:
- adapter_name = wbem_network_adapter.NetConnectionID
- mac_address = wbem_network_adapter.MacAddress.lower()
- wbem_network_adapter_config = \
- wbem_network_adapter.associators_(
- 'Win32_NetworkAdapterSetting',
- 'Win32_NetworkAdapterConfiguration')[0]
- ip_address = ''
- subnet_mask = ''
- if wbem_network_adapter_config.IPEnabled:
- ip_address = wbem_network_adapter_config.IPAddress[0]
- subnet_mask = wbem_network_adapter_config.IPSubnet[0]
- #wbem_network_adapter_config.DefaultIPGateway[0]
- network_adapters.append({'name': adapter_name,
- 'mac-address': mac_address,
- 'ip-address': ip_address,
- 'subnet-mask': subnet_mask})
- return network_adapters
-
-
-def _get_linux_network_adapters():
- """Get the list of Linux network adapters."""
- import fcntl
- max_bytes = 8096
- arch = platform.architecture()[0]
- if arch == ARCH_32_BIT:
- offset1 = 32
- offset2 = 32
- elif arch == ARCH_64_BIT:
- offset1 = 16
- offset2 = 40
- else:
- raise OSError(_("Unknown architecture: %s") % arch)
- sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- names = array.array('B', '\0' * max_bytes)
- outbytes = struct.unpack('iL', fcntl.ioctl(
- sock.fileno(),
- 0x8912,
- struct.pack('iL', max_bytes, names.buffer_info()[0])))[0]
- adapter_names = \
- [names.tostring()[n_counter:n_counter + offset1].split('\0', 1)[0]
- for n_counter in xrange(0, outbytes, offset2)]
- network_adapters = []
- for adapter_name in adapter_names:
- ip_address = socket.inet_ntoa(fcntl.ioctl(
- sock.fileno(),
- 0x8915,
- struct.pack('256s', adapter_name))[20:24])
- subnet_mask = socket.inet_ntoa(fcntl.ioctl(
- sock.fileno(),
- 0x891b,
- struct.pack('256s', adapter_name))[20:24])
- raw_mac_address = '%012x' % _bytes2int(fcntl.ioctl(
- sock.fileno(),
- 0x8927,
- struct.pack('256s', adapter_name))[18:24])
- mac_address = ":".join([raw_mac_address[m_counter:m_counter + 2]
- for m_counter in range(0, len(raw_mac_address), 2)]).lower()
- network_adapters.append({'name': adapter_name,
- 'mac-address': mac_address,
- 'ip-address': ip_address,
- 'subnet-mask': subnet_mask})
- return network_adapters
-
-
-def _get_adapter_name_and_ip_address(network_adapters, mac_address):
- """Get the adapter name based on the MAC address."""
- adapter_name = None
- ip_address = None
- for network_adapter in network_adapters:
- if network_adapter['mac-address'] == mac_address.lower():
- adapter_name = network_adapter['name']
- ip_address = network_adapter['ip-address']
- break
- return adapter_name, ip_address
-
-
-def _get_win_adapter_name_and_ip_address(mac_address):
- """Get Windows network adapter name."""
- network_adapters = _get_windows_network_adapters()
- return _get_adapter_name_and_ip_address(network_adapters, mac_address)
-
-
-def _get_linux_adapter_name_and_ip_address(mac_address):
- """Get Linux network adapter name."""
- network_adapters = _get_linux_network_adapters()
- return _get_adapter_name_and_ip_address(network_adapters, mac_address)
-
-
-def _execute(cmd_list, process_input=None, check_exit_code=True):
- """Executes the command with the list of arguments specified."""
- cmd = ' '.join(cmd_list)
- logging.debug(_("Executing command: '%s'") % cmd)
- env = os.environ.copy()
- obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
- result = None
- if process_input is not None:
- result = obj.communicate(process_input)
- else:
- result = obj.communicate()
- obj.stdin.close()
- if obj.returncode:
- logging.debug(_("Result was %s") % obj.returncode)
- if check_exit_code and obj.returncode != 0:
- (stdout, stderr) = result
- raise ProcessExecutionError(exit_code=obj.returncode,
- stdout=stdout,
- stderr=stderr,
- cmd=cmd)
- time.sleep(0.1)
- return result
-
-
-def _windows_set_networking():
- """Set IP address for the windows VM."""
- program_files = os.environ.get('PROGRAMFILES')
- program_files_x86 = os.environ.get('PROGRAMFILES(X86)')
- vmware_tools_bin = None
- if os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
- 'vmtoolsd.exe')):
- vmware_tools_bin = os.path.join(program_files, 'VMware',
- 'VMware Tools', 'vmtoolsd.exe')
- elif os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
- 'VMwareService.exe')):
- vmware_tools_bin = os.path.join(program_files, 'VMware',
- 'VMware Tools', 'VMwareService.exe')
- elif program_files_x86 and os.path.exists(os.path.join(program_files_x86,
- 'VMware', 'VMware Tools',
- 'VMwareService.exe')):
- vmware_tools_bin = os.path.join(program_files_x86, 'VMware',
- 'VMware Tools', 'VMwareService.exe')
- if vmware_tools_bin:
- cmd = ['"' + vmware_tools_bin + '"', '--cmd', 'machine.id.get']
- for network_detail in _parse_network_details(_execute(cmd,
- check_exit_code=False)):
- mac_address, ip_address, subnet_mask, gateway, broadcast,\
- dns_servers = network_detail
- adapter_name, current_ip_address = \
- _get_win_adapter_name_and_ip_address(mac_address)
- if adapter_name and not ip_address == current_ip_address:
- cmd = ['netsh', 'interface', 'ip', 'set', 'address',
- 'name="%s"' % adapter_name, 'source=static', ip_address,
- subnet_mask, gateway, '1']
- _execute(cmd)
- # Windows doesn't let you manually set the broadcast address
- for dns_server in dns_servers:
- if dns_server:
- cmd = ['netsh', 'interface', 'ip', 'add', 'dns',
- 'name="%s"' % adapter_name, dns_server]
- _execute(cmd)
- else:
- logging.warn(_("VMware Tools is not installed"))
-
-
-def _filter_duplicates(all_entries):
- final_list = []
- for entry in all_entries:
- if entry and entry not in final_list:
- final_list.append(entry)
- return final_list
-
-
-def _set_rhel_networking(network_details=None):
- """Set IPv4 network settings for RHEL distros."""
- network_details = network_details or []
- all_dns_servers = []
- for network_detail in network_details:
- mac_address, ip_address, subnet_mask, gateway, broadcast,\
- dns_servers = network_detail
- all_dns_servers.extend(dns_servers)
- adapter_name, current_ip_address = \
- _get_linux_adapter_name_and_ip_address(mac_address)
- if adapter_name and not ip_address == current_ip_address:
- interface_file_name = \
- '/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
- # Remove file
- os.remove(interface_file_name)
- # Touch file
- _execute(['touch', interface_file_name])
- interface_file = open(interface_file_name, 'w')
- interface_file.write('\nDEVICE=%s' % adapter_name)
- interface_file.write('\nUSERCTL=yes')
- interface_file.write('\nONBOOT=yes')
- interface_file.write('\nBOOTPROTO=static')
- interface_file.write('\nBROADCAST=%s' % broadcast)
- interface_file.write('\nNETWORK=')
- interface_file.write('\nGATEWAY=%s' % gateway)
- interface_file.write('\nNETMASK=%s' % subnet_mask)
- interface_file.write('\nIPADDR=%s' % ip_address)
- interface_file.write('\nMACADDR=%s' % mac_address)
- interface_file.close()
- if all_dns_servers:
- dns_file_name = "/etc/resolv.conf"
- os.remove(dns_file_name)
- _execute(['touch', dns_file_name])
- dns_file = open(dns_file_name, 'w')
- dns_file.write("; generated by OpenStack guest tools")
- unique_entries = _filter_duplicates(all_dns_servers)
- for dns_server in unique_entries:
- dns_file.write("\nnameserver %s" % dns_server)
- dns_file.close()
- _execute(['/sbin/service', 'network', 'restart'])
-
-
-def _set_ubuntu_networking(network_details=None):
- """Set IPv4 network settings for Ubuntu."""
- network_details = network_details or []
- all_dns_servers = []
- interface_file_name = '/etc/network/interfaces'
- # Remove file
- os.remove(interface_file_name)
- # Touch file
- _execute(['touch', interface_file_name])
- interface_file = open(interface_file_name, 'w')
- for device, network_detail in enumerate(network_details):
- mac_address, ip_address, subnet_mask, gateway, broadcast,\
- dns_servers = network_detail
- all_dns_servers.extend(dns_servers)
- adapter_name, current_ip_address = \
- _get_linux_adapter_name_and_ip_address(mac_address)
-
- if adapter_name:
- interface_file.write('\nauto %s' % adapter_name)
- interface_file.write('\niface %s inet static' % adapter_name)
- interface_file.write('\nbroadcast %s' % broadcast)
- interface_file.write('\ngateway %s' % gateway)
- interface_file.write('\nnetmask %s' % subnet_mask)
- interface_file.write('\naddress %s\n' % ip_address)
- logging.debug(_("Successfully configured NIC %d with "
- "NIC info %s") % (device, network_detail))
- interface_file.close()
-
- if all_dns_servers:
- dns_file_name = "/etc/resolv.conf"
- os.remove(dns_file_name)
- _execute(['touch', dns_file_name])
- dns_file = open(dns_file_name, 'w')
- dns_file.write("; generated by OpenStack guest tools")
- unique_entries = _filter_duplicates(all_dns_servers)
- for dns_server in unique_entries:
- dns_file.write("\nnameserver %s" % dns_server)
- dns_file.close()
-
- logging.debug(_("Restarting networking....\n"))
- _execute(['/etc/init.d/networking', 'restart'])
-
-
-def _linux_set_networking():
- """Set IP address for the Linux VM."""
- vmware_tools_bin = None
- if os.path.exists('/usr/sbin/vmtoolsd'):
- vmware_tools_bin = '/usr/sbin/vmtoolsd'
- elif os.path.exists('/usr/bin/vmtoolsd'):
- vmware_tools_bin = '/usr/bin/vmtoolsd'
- elif os.path.exists('/usr/sbin/vmware-guestd'):
- vmware_tools_bin = '/usr/sbin/vmware-guestd'
- elif os.path.exists('/usr/bin/vmware-guestd'):
- vmware_tools_bin = '/usr/bin/vmware-guestd'
- if vmware_tools_bin:
- cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
- network_details = _parse_network_details(_execute(cmd,
- check_exit_code=False))
- # TODO(sateesh): For other distros like suse, debian, BSD, etc.
- if(platform.dist()[0] == 'Ubuntu'):
- _set_ubuntu_networking(network_details)
- elif (platform.dist()[0] == 'redhat'):
- _set_rhel_networking(network_details)
- else:
- logging.warn(_("Distro '%s' not supported") % platform.dist()[0])
- else:
- logging.warn(_("VMware Tools is not installed"))
-
-if __name__ == '__main__':
- pltfrm = sys.platform
- if pltfrm == PLATFORM_WIN:
- _windows_set_networking()
- elif pltfrm == PLATFORM_LINUX:
- _linux_set_networking()
- else:
- raise NotImplementedError(_("Platform not implemented: '%s'") % pltfrm)
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Guest tools for ESX to set up network in the guest.
+On Windows we require pyWin32 installed on Python.
+"""
+
+import array
+import gettext
+import logging
+import os
+import platform
+import socket
+import struct
+import subprocess
+import sys
+import time
+
+gettext.install('nova', unicode=1)
+
+PLATFORM_WIN = 'win32'
+PLATFORM_LINUX = 'linux2'
+ARCH_32_BIT = '32bit'
+ARCH_64_BIT = '64bit'
+NO_MACHINE_ID = 'No machine id'
+
+# Logging
+FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
+if sys.platform == PLATFORM_WIN:
+ LOG_DIR = os.path.join(os.environ.get('ALLUSERSPROFILE'), 'openstack')
+elif sys.platform == PLATFORM_LINUX:
+ LOG_DIR = '/var/log/openstack'
+else:
+ LOG_DIR = 'logs'
+if not os.path.exists(LOG_DIR):
+ os.mkdir(LOG_DIR)
+LOG_FILENAME = os.path.join(LOG_DIR, 'openstack-guest-tools.log')
+logging.basicConfig(filename=LOG_FILENAME, format=FORMAT)
+
+if sys.hexversion < 0x3000000:
+ _byte = ord # 2.x chr to integer
+else:
+ _byte = int # 3.x byte to integer
+
+
+class ProcessExecutionError:
+ """Process Execution Error Class."""
+
+ def __init__(self, exit_code, stdout, stderr, cmd):
+ self.exit_code = exit_code
+ self.stdout = stdout
+ self.stderr = stderr
+ self.cmd = cmd
+
+ def __str__(self):
+ return str(self.exit_code)
+
+
+def _bytes2int(bytes):
+ """Convert bytes to int."""
+ intgr = 0
+ for byt in bytes:
+ intgr = (intgr << 8) + _byte(byt)
+ return intgr
+
+
+def _parse_network_details(machine_id):
+ """
+ Parse the machine_id to get MAC, IP, Netmask and Gateway fields per NIC.
+ machine_id is of the form ('NIC_record#NIC_record#', '')
+ Each of the NIC will have record NIC_record in the form
+ 'MAC;IP;Netmask;Gateway;Broadcast;DNS' where ';' is field separator.
+ Each record is separated by '#' from next record.
+ """
+ logging.debug(_("Received machine_id from vmtools : %s") % machine_id[0])
+ network_details = []
+ if machine_id[1].strip() == "1":
+ pass
+ else:
+ for machine_id_str in machine_id[0].split('#'):
+ network_info_list = machine_id_str.split(';')
+ if len(network_info_list) % 6 != 0:
+ break
+ no_grps = len(network_info_list) / 6
+ i = 0
+ while i < no_grps:
+ k = i * 6
+ network_details.append((
+ network_info_list[k].strip().lower(),
+ network_info_list[k + 1].strip(),
+ network_info_list[k + 2].strip(),
+ network_info_list[k + 3].strip(),
+ network_info_list[k + 4].strip(),
+ network_info_list[k + 5].strip().split(',')))
+ i += 1
+ logging.debug(_("NIC information from vmtools : %s") % network_details)
+ return network_details
+
+
+def _get_windows_network_adapters():
+ """Get the list of windows network adapters."""
+ import win32com.client
+ wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
+ wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
+ wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
+ network_adapters = []
+ for wbem_network_adapter in wbem_network_adapters:
+ if wbem_network_adapter.NetConnectionStatus == 2 or \
+ wbem_network_adapter.NetConnectionStatus == 7:
+ adapter_name = wbem_network_adapter.NetConnectionID
+ mac_address = wbem_network_adapter.MacAddress.lower()
+ wbem_network_adapter_config = \
+ wbem_network_adapter.associators_(
+ 'Win32_NetworkAdapterSetting',
+ 'Win32_NetworkAdapterConfiguration')[0]
+ ip_address = ''
+ subnet_mask = ''
+ if wbem_network_adapter_config.IPEnabled:
+ ip_address = wbem_network_adapter_config.IPAddress[0]
+ subnet_mask = wbem_network_adapter_config.IPSubnet[0]
+ #wbem_network_adapter_config.DefaultIPGateway[0]
+ network_adapters.append({'name': adapter_name,
+ 'mac-address': mac_address,
+ 'ip-address': ip_address,
+ 'subnet-mask': subnet_mask})
+ return network_adapters
+
+
+def _get_linux_network_adapters():
+ """Get the list of Linux network adapters."""
+ import fcntl
+ max_bytes = 8096
+ arch = platform.architecture()[0]
+ if arch == ARCH_32_BIT:
+ offset1 = 32
+ offset2 = 32
+ elif arch == ARCH_64_BIT:
+ offset1 = 16
+ offset2 = 40
+ else:
+ raise OSError(_("Unknown architecture: %s") % arch)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ names = array.array('B', '\0' * max_bytes)
+ outbytes = struct.unpack('iL', fcntl.ioctl(
+ sock.fileno(),
+ 0x8912,
+ struct.pack('iL', max_bytes, names.buffer_info()[0])))[0]
+ adapter_names = \
+ [names.tostring()[n_counter:n_counter + offset1].split('\0', 1)[0]
+ for n_counter in xrange(0, outbytes, offset2)]
+ network_adapters = []
+ for adapter_name in adapter_names:
+ ip_address = socket.inet_ntoa(fcntl.ioctl(
+ sock.fileno(),
+ 0x8915,
+ struct.pack('256s', adapter_name))[20:24])
+ subnet_mask = socket.inet_ntoa(fcntl.ioctl(
+ sock.fileno(),
+ 0x891b,
+ struct.pack('256s', adapter_name))[20:24])
+ raw_mac_address = '%012x' % _bytes2int(fcntl.ioctl(
+ sock.fileno(),
+ 0x8927,
+ struct.pack('256s', adapter_name))[18:24])
+ mac_address = ":".join([raw_mac_address[m_counter:m_counter + 2]
+ for m_counter in range(0, len(raw_mac_address), 2)]).lower()
+ network_adapters.append({'name': adapter_name,
+ 'mac-address': mac_address,
+ 'ip-address': ip_address,
+ 'subnet-mask': subnet_mask})
+ return network_adapters
+
+
+def _get_adapter_name_and_ip_address(network_adapters, mac_address):
+ """Get the adapter name based on the MAC address."""
+ adapter_name = None
+ ip_address = None
+ for network_adapter in network_adapters:
+ if network_adapter['mac-address'] == mac_address.lower():
+ adapter_name = network_adapter['name']
+ ip_address = network_adapter['ip-address']
+ break
+ return adapter_name, ip_address
+
+
+def _get_win_adapter_name_and_ip_address(mac_address):
+ """Get Windows network adapter name."""
+ network_adapters = _get_windows_network_adapters()
+ return _get_adapter_name_and_ip_address(network_adapters, mac_address)
+
+
+def _get_linux_adapter_name_and_ip_address(mac_address):
+ """Get Linux network adapter name."""
+ network_adapters = _get_linux_network_adapters()
+ return _get_adapter_name_and_ip_address(network_adapters, mac_address)
+
+
+def _execute(cmd_list, process_input=None, check_exit_code=True):
+ """Executes the command with the list of arguments specified."""
+ cmd = ' '.join(cmd_list)
+ logging.debug(_("Executing command: '%s'") % cmd)
+ env = os.environ.copy()
+ obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+ result = None
+ if process_input is not None:
+ result = obj.communicate(process_input)
+ else:
+ result = obj.communicate()
+ obj.stdin.close()
+ if obj.returncode:
+ logging.debug(_("Result was %s") % obj.returncode)
+ if check_exit_code and obj.returncode != 0:
+ (stdout, stderr) = result
+ raise ProcessExecutionError(exit_code=obj.returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=cmd)
+ time.sleep(0.1)
+ return result
+
+
+def _windows_set_networking():
+ """Set IP address for the windows VM."""
+ program_files = os.environ.get('PROGRAMFILES')
+ program_files_x86 = os.environ.get('PROGRAMFILES(X86)')
+ vmware_tools_bin = None
+ if os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
+ 'vmtoolsd.exe')):
+ vmware_tools_bin = os.path.join(program_files, 'VMware',
+ 'VMware Tools', 'vmtoolsd.exe')
+ elif os.path.exists(os.path.join(program_files, 'VMware', 'VMware Tools',
+ 'VMwareService.exe')):
+ vmware_tools_bin = os.path.join(program_files, 'VMware',
+ 'VMware Tools', 'VMwareService.exe')
+ elif program_files_x86 and os.path.exists(os.path.join(program_files_x86,
+ 'VMware', 'VMware Tools',
+ 'VMwareService.exe')):
+ vmware_tools_bin = os.path.join(program_files_x86, 'VMware',
+ 'VMware Tools', 'VMwareService.exe')
+ if vmware_tools_bin:
+ cmd = ['"' + vmware_tools_bin + '"', '--cmd', 'machine.id.get']
+ for network_detail in _parse_network_details(_execute(cmd,
+ check_exit_code=False)):
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ adapter_name, current_ip_address = \
+ _get_win_adapter_name_and_ip_address(mac_address)
+ if adapter_name and not ip_address == current_ip_address:
+ cmd = ['netsh', 'interface', 'ip', 'set', 'address',
+ 'name="%s"' % adapter_name, 'source=static', ip_address,
+ subnet_mask, gateway, '1']
+ _execute(cmd)
+ # Windows doesn't let you manually set the broadcast address
+ for dns_server in dns_servers:
+ if dns_server:
+ cmd = ['netsh', 'interface', 'ip', 'add', 'dns',
+ 'name="%s"' % adapter_name, dns_server]
+ _execute(cmd)
+ else:
+ logging.warn(_("VMware Tools is not installed"))
+
+
+def _filter_duplicates(all_entries):
+ final_list = []
+ for entry in all_entries:
+ if entry and entry not in final_list:
+ final_list.append(entry)
+ return final_list
+
+
+def _set_rhel_networking(network_details=None):
+ """Set IPv4 network settings for RHEL distros."""
+ network_details = network_details or []
+ all_dns_servers = []
+ for network_detail in network_details:
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ all_dns_servers.extend(dns_servers)
+ adapter_name, current_ip_address = \
+ _get_linux_adapter_name_and_ip_address(mac_address)
+ if adapter_name and not ip_address == current_ip_address:
+ interface_file_name = \
+ '/etc/sysconfig/network-scripts/ifcfg-%s' % adapter_name
+ # Remove file
+ os.remove(interface_file_name)
+ # Touch file
+ _execute(['touch', interface_file_name])
+ interface_file = open(interface_file_name, 'w')
+ interface_file.write('\nDEVICE=%s' % adapter_name)
+ interface_file.write('\nUSERCTL=yes')
+ interface_file.write('\nONBOOT=yes')
+ interface_file.write('\nBOOTPROTO=static')
+ interface_file.write('\nBROADCAST=%s' % broadcast)
+ interface_file.write('\nNETWORK=')
+ interface_file.write('\nGATEWAY=%s' % gateway)
+ interface_file.write('\nNETMASK=%s' % subnet_mask)
+ interface_file.write('\nIPADDR=%s' % ip_address)
+ interface_file.write('\nMACADDR=%s' % mac_address)
+ interface_file.close()
+ if all_dns_servers:
+ dns_file_name = "/etc/resolv.conf"
+ os.remove(dns_file_name)
+ _execute(['touch', dns_file_name])
+ dns_file = open(dns_file_name, 'w')
+ dns_file.write("; generated by OpenStack guest tools")
+ unique_entries = _filter_duplicates(all_dns_servers)
+ for dns_server in unique_entries:
+ dns_file.write("\nnameserver %s" % dns_server)
+ dns_file.close()
+ _execute(['/sbin/service', 'network', 'restart'])
+
+
+def _set_ubuntu_networking(network_details=None):
+ """Set IPv4 network settings for Ubuntu."""
+ network_details = network_details or []
+ all_dns_servers = []
+ interface_file_name = '/etc/network/interfaces'
+ # Remove file
+ os.remove(interface_file_name)
+ # Touch file
+ _execute(['touch', interface_file_name])
+ interface_file = open(interface_file_name, 'w')
+ for device, network_detail in enumerate(network_details):
+ mac_address, ip_address, subnet_mask, gateway, broadcast,\
+ dns_servers = network_detail
+ all_dns_servers.extend(dns_servers)
+ adapter_name, current_ip_address = \
+ _get_linux_adapter_name_and_ip_address(mac_address)
+
+ if adapter_name:
+ interface_file.write('\nauto %s' % adapter_name)
+ interface_file.write('\niface %s inet static' % adapter_name)
+ interface_file.write('\nbroadcast %s' % broadcast)
+ interface_file.write('\ngateway %s' % gateway)
+ interface_file.write('\nnetmask %s' % subnet_mask)
+ interface_file.write('\naddress %s\n' % ip_address)
+ logging.debug(_("Successfully configured NIC %d with "
+ "NIC info %s") % (device, network_detail))
+ interface_file.close()
+
+ if all_dns_servers:
+ dns_file_name = "/etc/resolv.conf"
+ os.remove(dns_file_name)
+ _execute(['touch', dns_file_name])
+ dns_file = open(dns_file_name, 'w')
+ dns_file.write("; generated by OpenStack guest tools")
+ unique_entries = _filter_duplicates(all_dns_servers)
+ for dns_server in unique_entries:
+ dns_file.write("\nnameserver %s" % dns_server)
+ dns_file.close()
+
+ logging.debug(_("Restarting networking....\n"))
+ _execute(['/etc/init.d/networking', 'restart'])
+
+
+def _linux_set_networking():
+ """Set IP address for the Linux VM."""
+ vmware_tools_bin = None
+ if os.path.exists('/usr/sbin/vmtoolsd'):
+ vmware_tools_bin = '/usr/sbin/vmtoolsd'
+ elif os.path.exists('/usr/bin/vmtoolsd'):
+ vmware_tools_bin = '/usr/bin/vmtoolsd'
+ elif os.path.exists('/usr/sbin/vmware-guestd'):
+ vmware_tools_bin = '/usr/sbin/vmware-guestd'
+ elif os.path.exists('/usr/bin/vmware-guestd'):
+ vmware_tools_bin = '/usr/bin/vmware-guestd'
+ if vmware_tools_bin:
+ cmd = [vmware_tools_bin, '--cmd', 'machine.id.get']
+ network_details = _parse_network_details(_execute(cmd,
+ check_exit_code=False))
+ # TODO(sateesh): For other distros like suse, debian, BSD, etc.
+ if(platform.dist()[0] == 'Ubuntu'):
+ _set_ubuntu_networking(network_details)
+ elif (platform.dist()[0] == 'redhat'):
+ _set_rhel_networking(network_details)
+ else:
+ logging.warn(_("Distro '%s' not supported") % platform.dist()[0])
+ else:
+ logging.warn(_("VMware Tools is not installed"))
+
+if __name__ == '__main__':
+ pltfrm = sys.platform
+ if pltfrm == PLATFORM_WIN:
+ _windows_set_networking()
+ elif pltfrm == PLATFORM_LINUX:
+ _linux_set_networking()
+ else:
+ raise NotImplementedError(_("Platform not implemented: '%s'") % pltfrm)