summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArmando Migliaccio <armando.migliaccio@citrix.com>2010-11-17 18:33:47 +0000
committerArmando Migliaccio <armando.migliaccio@citrix.com>2010-11-17 18:33:47 +0000
commite0ad4e8dd9f73c3c1e775f3deebe5a08f2321ac6 (patch)
treead3a30ad0068dab59c09f1da2799266bb859ed2e
parent0c19386f7c4ca063edbf8c10ffb86b399884e457 (diff)
parent551fd309fcbfedb99555a81fac6a40f003598fd6 (diff)
merged with trunk
-rw-r--r--contrib/puppet/files/etc/default/nova-compute1
-rw-r--r--contrib/puppet/files/etc/default/nova-volume1
-rw-r--r--contrib/puppet/files/etc/issue5
-rw-r--r--contrib/puppet/files/etc/libvirt/qemu.conf170
-rw-r--r--contrib/puppet/files/etc/lvm/lvm.conf463
-rw-r--r--contrib/puppet/files/etc/nova.conf28
-rw-r--r--contrib/puppet/files/production/boto.cfg3
-rw-r--r--contrib/puppet/files/production/genvpn.sh35
-rw-r--r--contrib/puppet/files/production/libvirt.qemu.xml.template35
-rw-r--r--contrib/puppet/files/production/my.cnf137
-rwxr-xr-xcontrib/puppet/files/production/nova-iptables185
-rw-r--r--contrib/puppet/files/production/nova-iscsi-dev.sh19
-rwxr-xr-xcontrib/puppet/files/production/setup_data.sh6
-rwxr-xr-xcontrib/puppet/files/production/slap.sh261
-rw-r--r--contrib/puppet/fileserver.conf8
-rw-r--r--contrib/puppet/manifests/classes/apt.pp1
-rw-r--r--contrib/puppet/manifests/classes/issue.pp14
-rw-r--r--contrib/puppet/manifests/classes/kern_module.pp34
-rw-r--r--contrib/puppet/manifests/classes/loopback.pp6
-rw-r--r--contrib/puppet/manifests/classes/lvm.pp8
-rw-r--r--contrib/puppet/manifests/classes/lvmconf.pp8
-rw-r--r--contrib/puppet/manifests/classes/nova.pp464
-rw-r--r--contrib/puppet/manifests/classes/swift.pp7
-rw-r--r--contrib/puppet/manifests/site.pp120
-rw-r--r--contrib/puppet/manifests/templates.pp21
-rw-r--r--contrib/puppet/puppet.conf11
-rw-r--r--contrib/puppet/templates/haproxy.cfg.erb39
-rw-r--r--contrib/puppet/templates/monitrc-nova-api.erb138
-rw-r--r--contrib/puppet/templates/nova-iptables.erb10
-rw-r--r--contrib/puppet/templates/production/nova-common.conf.erb56
-rw-r--r--contrib/puppet/templates/production/nova-nova.conf.erb21
-rw-r--r--doc/ext/nova_todo.py37
-rw-r--r--doc/source/adminguide/distros/others.rst88
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.04.rst41
-rw-r--r--doc/source/adminguide/distros/ubuntu.10.10.rst41
-rw-r--r--doc/source/adminguide/index.rst2
-rw-r--r--doc/source/adminguide/managing.networks.rst71
-rw-r--r--doc/source/adminguide/multi.node.install.rst63
-rw-r--r--doc/source/adminguide/network.flat.rst60
-rw-r--r--doc/source/adminguide/network.vlan.rst179
-rw-r--r--doc/source/adminguide/single.node.install.rst344
-rw-r--r--doc/source/community.rst3
-rw-r--r--doc/source/conf.py7
-rw-r--r--doc/source/nova.concepts.rst7
-rw-r--r--doc/source/quickstart.rst24
-rw-r--r--doc/source/service.architecture.rst6
-rw-r--r--nova/api/__init__.py2
-rw-r--r--nova/api/ec2/cloud.py6
-rw-r--r--nova/compute/disk.py16
-rw-r--r--nova/service.py3
-rw-r--r--nova/tests/cloud_unittest.py31
-rw-r--r--nova/tests/network_unittest.py2
-rw-r--r--nova/utils.py7
-rw-r--r--run_tests.py1
-rw-r--r--setup.py3
-rwxr-xr-xtools/nova-debug92
56 files changed, 3343 insertions, 108 deletions
diff --git a/contrib/puppet/files/etc/default/nova-compute b/contrib/puppet/files/etc/default/nova-compute
new file mode 100644
index 000000000..8bd7d091c
--- /dev/null
+++ b/contrib/puppet/files/etc/default/nova-compute
@@ -0,0 +1 @@
+ENABLED=true
diff --git a/contrib/puppet/files/etc/default/nova-volume b/contrib/puppet/files/etc/default/nova-volume
new file mode 100644
index 000000000..8bd7d091c
--- /dev/null
+++ b/contrib/puppet/files/etc/default/nova-volume
@@ -0,0 +1 @@
+ENABLED=true
diff --git a/contrib/puppet/files/etc/issue b/contrib/puppet/files/etc/issue
new file mode 100644
index 000000000..8c567221b
--- /dev/null
+++ b/contrib/puppet/files/etc/issue
@@ -0,0 +1,5 @@
+-----------------------------------------------
+
+ Welcome to your OpenStack installation!
+
+-----------------------------------------------
diff --git a/contrib/puppet/files/etc/libvirt/qemu.conf b/contrib/puppet/files/etc/libvirt/qemu.conf
new file mode 100644
index 000000000..7839f12e5
--- /dev/null
+++ b/contrib/puppet/files/etc/libvirt/qemu.conf
@@ -0,0 +1,170 @@
+# Master configuration file for the QEMU driver.
+# All settings described here are optional - if omitted, sensible
+# defaults are used.
+
+# VNC is configured to listen on 127.0.0.1 by default.
+# To make it listen on all public interfaces, uncomment
+# this next option.
+#
+# NB, strong recommendation to enable TLS + x509 certificate
+# verification when allowing public access
+#
+# vnc_listen = "0.0.0.0"
+
+
+# Enable use of TLS encryption on the VNC server. This requires
+# a VNC client which supports the VeNCrypt protocol extension.
+# Examples include vinagre, virt-viewer, virt-manager and vencrypt
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to setup CA and issue a server certificate
+# before enabling this.
+#
+# vnc_tls = 1
+
+
+# Use of TLS requires that x509 certificates be issued. The
+# default it to keep them in /etc/pki/libvirt-vnc. This directory
+# must contain
+#
+# ca-cert.pem - the CA master certificate
+# server-cert.pem - the server certificate signed with ca-cert.pem
+# server-key.pem - the server private key
+#
+# This option allows the certificate directory to be changed
+#
+# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
+
+
+# The default TLS configuration only uses certificates for the server
+# allowing the client to verify the server's identity and establish
+# and encrypted channel.
+#
+# It is possible to use x509 certificates for authentication too, by
+# issuing a x509 certificate to every client who needs to connect.
+#
+# Enabling this option will reject any client who does not have a
+# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
+#
+# vnc_tls_x509_verify = 1
+
+
+# The default VNC password. Only 8 letters are significant for
+# VNC passwords. This parameter is only used if the per-domain
+# XML config does not already provide a password. To allow
+# access without passwords, leave this commented out. An empty
+# string will still enable passwords, but be rejected by QEMU
+# effectively preventing any use of VNC. Obviously change this
+# example here before you set this
+#
+# vnc_password = "XYZ12345"
+
+
+# Enable use of SASL encryption on the VNC server. This requires
+# a VNC client which supports the SASL protocol extension.
+# Examples include vinagre, virt-viewer and virt-manager
+# itself. UltraVNC, RealVNC, TightVNC do not support this
+#
+# It is necessary to configure /etc/sasl2/qemu.conf to choose
+# the desired SASL plugin (eg, GSSPI for Kerberos)
+#
+# vnc_sasl = 1
+
+
+# The default SASL configuration file is located in /etc/sasl2/
+# When running libvirtd unprivileged, it may be desirable to
+# override the configs in this location. Set this parameter to
+# point to the directory, and create a qemu.conf in that location
+#
+# vnc_sasl_dir = "/some/directory/sasl2"
+
+
+
+
+# The default security driver is SELinux. If SELinux is disabled
+# on the host, then the security driver will automatically disable
+# itself. If you wish to disable QEMU SELinux security driver while
+# leaving SELinux enabled for the host in general, then set this
+# to 'none' instead
+#
+# security_driver = "selinux"
+
+
+# The user ID for QEMU processes run by the system instance
+user = "root"
+
+# The group ID for QEMU processes run by the system instance
+group = "root"
+
+# Whether libvirt should dynamically change file ownership
+# to match the configured user/group above. Defaults to 1.
+# Set to 0 to disable file ownership changes.
+#dynamic_ownership = 1
+
+
+# What cgroup controllers to make use of with QEMU guests
+#
+# - 'cpu' - use for schedular tunables
+# - 'devices' - use for device whitelisting
+#
+# NB, even if configured here, they won't be used unless
+# the adminsitrator has mounted cgroups. eg
+#
+# mkdir /dev/cgroup
+# mount -t cgroup -o devices,cpu none /dev/cgroup
+#
+# They can be mounted anywhere, and different controlers
+# can be mounted in different locations. libvirt will detect
+# where they are located.
+#
+# cgroup_controllers = [ "cpu", "devices" ]
+
+# This is the basic set of devices allowed / required by
+# all virtual machines.
+#
+# As well as this, any configured block backed disks,
+# all sound device, and all PTY devices are allowed.
+#
+# This will only need setting if newer QEMU suddenly
+# wants some device we don't already know a bout.
+#
+#cgroup_device_acl = [
+# "/dev/null", "/dev/full", "/dev/zero",
+# "/dev/random", "/dev/urandom",
+# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
+#]
+
+# The default format for Qemu/KVM guest save images is raw; that is, the
+# memory from the domain is dumped out directly to a file. If you have
+# guests with a large amount of memory, however, this can take up quite
+# a bit of space. If you would like to compress the images while they
+# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
+# for save_image_format. Note that this means you slow down the process of
+# saving a domain in order to save disk space; the list above is in descending
+# order by performance and ascending order by compression ratio.
+#
+# save_image_format = "raw"
+
+# If provided by the host and a hugetlbfs mount point is configured,
+# a guest may request huge page backing. When this mount point is
+# unspecified here, determination of a host mount point in /proc/mounts
+# will be attempted. Specifying an explicit mount overrides detection
+# of the same in /proc/mounts. Setting the mount point to "" will
+# disable guest hugepage backing.
+#
+# NB, within this mount point, guests will create memory backing files
+# in a location of $MOUNTPOINT/libvirt/qemu
+
+# hugetlbfs_mount = "/dev/hugepages"
+
+# mac_filter enables MAC addressed based filtering on bridge ports.
+# This currently requires ebtables to be installed.
+#
+# mac_filter = 1
+
+# By default, PCI devices below non-ACS switch are not allowed to be assigned
+# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
+# be assigned to guests.
+#
+# relaxed_acs_check = 1
diff --git a/contrib/puppet/files/etc/lvm/lvm.conf b/contrib/puppet/files/etc/lvm/lvm.conf
new file mode 100644
index 000000000..4e814ad49
--- /dev/null
+++ b/contrib/puppet/files/etc/lvm/lvm.conf
@@ -0,0 +1,463 @@
+# This is an example configuration file for the LVM2 system.
+# It contains the default settings that would be used if there was no
+# /etc/lvm/lvm.conf file.
+#
+# Refer to 'man lvm.conf' for further information including the file layout.
+#
+# To put this file in a different directory and override /etc/lvm set
+# the environment variable LVM_SYSTEM_DIR before running the tools.
+
+
+# This section allows you to configure which block devices should
+# be used by the LVM system.
+devices {
+
+ # Where do you want your volume groups to appear ?
+ dir = "/dev"
+
+ # An array of directories that contain the device nodes you wish
+ # to use with LVM2.
+ scan = [ "/dev" ]
+
+ # If several entries in the scanned directories correspond to the
+ # same block device and the tools need to display a name for device,
+ # all the pathnames are matched against each item in the following
+ # list of regular expressions in turn and the first match is used.
+ preferred_names = [ ]
+
+ # Try to avoid using undescriptive /dev/dm-N names, if present.
+ # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
+
+ # A filter that tells LVM2 to only use a restricted set of devices.
+ # The filter consists of an array of regular expressions. These
+ # expressions can be delimited by a character of your choice, and
+ # prefixed with either an 'a' (for accept) or 'r' (for reject).
+ # The first expression found to match a device name determines if
+ # the device will be accepted or rejected (ignored). Devices that
+ # don't match any patterns are accepted.
+
+ # Be careful if there there are symbolic links or multiple filesystem
+ # entries for the same device as each name is checked separately against
+ # the list of patterns. The effect is that if any name matches any 'a'
+ # pattern, the device is accepted; otherwise if any name matches any 'r'
+ # pattern it is rejected; otherwise it is accepted.
+
+ # Don't have more than one filter line active at once: only one gets used.
+
+ # Run vgscan after you change this parameter to ensure that
+ # the cache file gets regenerated (see below).
+ # If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
+
+
+ # By default we accept every block device:
+ filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ]
+
+ # Exclude the cdrom drive
+ # filter = [ "r|/dev/cdrom|" ]
+
+ # When testing I like to work with just loopback devices:
+ # filter = [ "a/loop/", "r/.*/" ]
+
+ # Or maybe all loops and ide drives except hdc:
+ # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
+
+ # Use anchors if you want to be really specific
+ # filter = [ "a|^/dev/hda8$|", "r/.*/" ]
+
+ # The results of the filtering are cached on disk to avoid
+ # rescanning dud devices (which can take a very long time).
+ # By default this cache is stored in the /etc/lvm/cache directory
+ # in a file called '.cache'.
+ # It is safe to delete the contents: the tools regenerate it.
+ # (The old setting 'cache' is still respected if neither of
+ # these new ones is present.)
+ cache_dir = "/etc/lvm/cache"
+ cache_file_prefix = ""
+
+ # You can turn off writing this cache file by setting this to 0.
+ write_cache_state = 1
+
+ # Advanced settings.
+
+ # List of pairs of additional acceptable block device types found
+ # in /proc/devices with maximum (non-zero) number of partitions.
+ # types = [ "fd", 16 ]
+
+ # If sysfs is mounted (2.6 kernels) restrict device scanning to
+ # the block devices it believes are valid.
+ # 1 enables; 0 disables.
+ sysfs_scan = 1
+
+ # By default, LVM2 will ignore devices used as components of
+ # software RAID (md) devices by looking for md superblocks.
+ # 1 enables; 0 disables.
+ md_component_detection = 1
+
+ # By default, if a PV is placed directly upon an md device, LVM2
+ # will align its data blocks with the md device's stripe-width.
+ # 1 enables; 0 disables.
+ md_chunk_alignment = 1
+
+ # By default, the start of a PV's data area will be a multiple of
+ # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
+ # - minimum_io_size - the smallest request the device can perform
+ # w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
+ # - optimal_io_size - the device's preferred unit of receiving I/O
+ # (e.g. MD's stripe width)
+ # minimum_io_size is used if optimal_io_size is undefined (0).
+ # If md_chunk_alignment is enabled, that detects the optimal_io_size.
+ # This setting takes precedence over md_chunk_alignment.
+ # 1 enables; 0 disables.
+ data_alignment_detection = 1
+
+ # Alignment (in KB) of start of data area when creating a new PV.
+ # If a PV is placed directly upon an md device and md_chunk_alignment or
+ # data_alignment_detection is enabled this parameter is ignored.
+ # Set to 0 for the default alignment of 64KB or page size, if larger.
+ data_alignment = 0
+
+ # By default, the start of the PV's aligned data area will be shifted by
+ # the 'alignment_offset' exposed in sysfs. This offset is often 0 but
+ # may be non-zero; e.g.: certain 4KB sector drives that compensate for
+ # windows partitioning will have an alignment_offset of 3584 bytes
+ # (sector 7 is the lowest aligned logical block, the 4KB sectors start
+ # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
+ # 1 enables; 0 disables.
+ data_alignment_offset_detection = 1
+
+ # If, while scanning the system for PVs, LVM2 encounters a device-mapper
+ # device that has its I/O suspended, it waits for it to become accessible.
+ # Set this to 1 to skip such devices. This should only be needed
+ # in recovery situations.
+ ignore_suspended_devices = 0
+}
+
+# This section that allows you to configure the nature of the
+# information that LVM2 reports.
+log {
+
+ # Controls the messages sent to stdout or stderr.
+ # There are three levels of verbosity, 3 being the most verbose.
+ verbose = 0
+
+ # Should we send log messages through syslog?
+ # 1 is yes; 0 is no.
+ syslog = 1
+
+ # Should we log error and debug messages to a file?
+ # By default there is no log file.
+ #file = "/var/log/lvm2.log"
+
+ # Should we overwrite the log file each time the program is run?
+ # By default we append.
+ overwrite = 0
+
+ # What level of log messages should we send to the log file and/or syslog?
+ # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
+ # 7 is the most verbose (LOG_DEBUG).
+ level = 0
+
+ # Format of output messages
+ # Whether or not (1 or 0) to indent messages according to their severity
+ indent = 1
+
+ # Whether or not (1 or 0) to display the command name on each line output
+ command_names = 0
+
+ # A prefix to use before the message text (but after the command name,
+ # if selected). Default is two spaces, so you can see/grep the severity
+ # of each message.
+ prefix = " "
+
+ # To make the messages look similar to the original LVM tools use:
+ # indent = 0
+ # command_names = 1
+ # prefix = " -- "
+
+ # Set this if you want log messages during activation.
+ # Don't use this in low memory situations (can deadlock).
+ # activation = 0
+}
+
+# Configuration of metadata backups and archiving. In LVM2 when we
+# talk about a 'backup' we mean making a copy of the metadata for the
+# *current* system. The 'archive' contains old metadata configurations.
+# Backups are stored in a human readeable text format.
+backup {
+
+ # Should we maintain a backup of the current metadata configuration ?
+ # Use 1 for Yes; 0 for No.
+ # Think very hard before turning this off!
+ backup = 1
+
+ # Where shall we keep it ?
+ # Remember to back up this directory regularly!
+ backup_dir = "/etc/lvm/backup"
+
+ # Should we maintain an archive of old metadata configurations.
+ # Use 1 for Yes; 0 for No.
+ # On by default. Think very hard before turning this off.
+ archive = 1
+
+ # Where should archived files go ?
+ # Remember to back up this directory regularly!
+ archive_dir = "/etc/lvm/archive"
+
+ # What is the minimum number of archive files you wish to keep ?
+ retain_min = 10
+
+ # What is the minimum time you wish to keep an archive file for ?
+ retain_days = 30
+}
+
+# Settings for the running LVM2 in shell (readline) mode.
+shell {
+
+ # Number of lines of history to store in ~/.lvm_history
+ history_size = 100
+}
+
+
+# Miscellaneous global LVM2 settings
+global {
+
+ # The file creation mask for any files and directories created.
+ # Interpreted as octal if the first digit is zero.
+ umask = 077
+
+ # Allow other users to read the files
+ #umask = 022
+
+ # Enabling test mode means that no changes to the on disk metadata
+ # will be made. Equivalent to having the -t option on every
+ # command. Defaults to off.
+ test = 0
+
+ # Default value for --units argument
+ units = "h"
+
+ # Since version 2.02.54, the tools distinguish between powers of
+ # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
+ # KB, MB, GB).
+ # If you have scripts that depend on the old behaviour, set this to 0
+ # temporarily until you update them.
+ si_unit_consistency = 1
+
+ # Whether or not to communicate with the kernel device-mapper.
+ # Set to 0 if you want to use the tools to manipulate LVM metadata
+ # without activating any logical volumes.
+ # If the device-mapper kernel driver is not present in your kernel
+ # setting this to 0 should suppress the error messages.
+ activation = 1
+
+ # If we can't communicate with device-mapper, should we try running
+ # the LVM1 tools?
+ # This option only applies to 2.4 kernels and is provided to help you
+ # switch between device-mapper kernels and LVM1 kernels.
+ # The LVM1 tools need to be installed with .lvm1 suffices
+ # e.g. vgscan.lvm1 and they will stop working after you start using
+ # the new lvm2 on-disk metadata format.
+ # The default value is set when the tools are built.
+ # fallback_to_lvm1 = 0
+
+ # The default metadata format that commands should use - "lvm1" or "lvm2".
+ # The command line override is -M1 or -M2.
+ # Defaults to "lvm2".
+ # format = "lvm2"
+
+ # Location of proc filesystem
+ proc = "/proc"
+
+ # Type of locking to use. Defaults to local file-based locking (1).
+ # Turn locking off by setting to 0 (dangerous: risks metadata corruption
+ # if LVM2 commands get run concurrently).
+ # Type 2 uses the external shared library locking_library.
+ # Type 3 uses built-in clustered locking.
+ # Type 4 uses read-only locking which forbids any operations that might
+ # change metadata.
+ locking_type = 1
+
+ # Set to 0 to fail when a lock request cannot be satisfied immediately.
+ wait_for_locks = 1
+
+ # If using external locking (type 2) and initialisation fails,
+ # with this set to 1 an attempt will be made to use the built-in
+ # clustered locking.
+ # If you are using a customised locking_library you should set this to 0.
+ fallback_to_clustered_locking = 1
+
+ # If an attempt to initialise type 2 or type 3 locking failed, perhaps
+ # because cluster components such as clvmd are not running, with this set
+ # to 1 an attempt will be made to use local file-based locking (type 1).
+ # If this succeeds, only commands against local volume groups will proceed.
+ # Volume Groups marked as clustered will be ignored.
+ fallback_to_local_locking = 1
+
+ # Local non-LV directory that holds file-based locks while commands are
+ # in progress. A directory like /tmp that may get wiped on reboot is OK.
+ locking_dir = "/var/lock/lvm"
+
+ # Whenever there are competing read-only and read-write access requests for
+ # a volume group's metadata, instead of always granting the read-only
+ # requests immediately, delay them to allow the read-write requests to be
+ # serviced. Without this setting, write access may be stalled by a high
+ # volume of read-only requests.
+ # NB. This option only affects locking_type = 1 viz. local file-based
+ # locking.
+ prioritise_write_locks = 1
+
+ # Other entries can go here to allow you to load shared libraries
+ # e.g. if support for LVM1 metadata was compiled as a shared library use
+ # format_libraries = "liblvm2format1.so"
+ # Full pathnames can be given.
+
+ # Search this directory first for shared libraries.
+ # library_dir = "/lib/lvm2"
+
+ # The external locking library to load if locking_type is set to 2.
+ # locking_library = "liblvm2clusterlock.so"
+}
+
+activation {
+ # Set to 0 to disable udev syncronisation (if compiled into the binaries).
+ # Processes will not wait for notification from udev.
+ # They will continue irrespective of any possible udev processing
+ # in the background. You should only use this if udev is not running
+ # or has rules that ignore the devices LVM2 creates.
+ # The command line argument --nodevsync takes precedence over this setting.
+ # If set to 1 when udev is not running, and there are LVM2 processes
+ # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
+ udev_sync = 1
+
+ # How to fill in missing stripes if activating an incomplete volume.
+ # Using "error" will make inaccessible parts of the device return
+ # I/O errors on access. You can instead use a device path, in which
+ # case, that device will be used to in place of missing stripes.
+ # But note that using anything other than "error" with mirrored
+ # or snapshotted volumes is likely to result in data corruption.
+ missing_stripe_filler = "error"
+
+ # How much stack (in KB) to reserve for use while devices suspended
+ reserved_stack = 256
+
+ # How much memory (in KB) to reserve for use while devices suspended
+ reserved_memory = 8192
+
+ # Nice value used while devices suspended
+ process_priority = -18
+
+ # If volume_list is defined, each LV is only activated if there is a
+ # match against the list.
+ # "vgname" and "vgname/lvname" are matched exactly.
+ # "@tag" matches any tag set in the LV or VG.
+ # "@*" matches if any tag defined on the host is also set in the LV or VG
+ #
+ # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+
+ # Size (in KB) of each copy operation when mirroring
+ mirror_region_size = 512
+
+ # Setting to use when there is no readahead value stored in the metadata.
+ #
+ # "none" - Disable readahead.
+ # "auto" - Use default value chosen by kernel.
+ readahead = "auto"
+
+ # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
+ # how a device failure affecting a mirror is handled.
+ # A mirror is composed of mirror images (copies) and a log.
+ # A disk log ensures that a mirror does not need to be re-synced
+ # (all copies made the same) every time a machine reboots or crashes.
+ #
+ # In the event of a failure, the specified policy will be used to determine
+ # what happens. This applies to automatic repairs (when the mirror is being
+ # monitored by dmeventd) and to manual lvconvert --repair when
+ # --use-policies is given.
+ #
+ # "remove" - Simply remove the faulty device and run without it. If
+ # the log device fails, the mirror would convert to using
+ # an in-memory log. This means the mirror will not
+ # remember its sync status across crashes/reboots and
+ # the entire mirror will be re-synced. If a
+ # mirror image fails, the mirror will convert to a
+ # non-mirrored device if there is only one remaining good
+ # copy.
+ #
+ # "allocate" - Remove the faulty device and try to allocate space on
+ # a new device to be a replacement for the failed device.
+ # Using this policy for the log is fast and maintains the
+ # ability to remember sync state through crashes/reboots.
+ # Using this policy for a mirror device is slow, as it
+ # requires the mirror to resynchronize the devices, but it
+ # will preserve the mirror characteristic of the device.
+ # This policy acts like "remove" if no suitable device and
+ # space can be allocated for the replacement.
+ #
+ # "allocate_anywhere" - Not yet implemented. Useful to place the log device
+ # temporarily on same physical volume as one of the mirror
+ # images. This policy is not recommended for mirror devices
+ # since it would break the redundant nature of the mirror. This
+ # policy acts like "remove" if no suitable device and space can
+ # be allocated for the replacement.
+
+ mirror_log_fault_policy = "allocate"
+ mirror_device_fault_policy = "remove"
+}
+
+
+####################
+# Advanced section #
+####################
+
+# Metadata settings
+#
+# metadata {
+ # Default number of copies of metadata to hold on each PV. 0, 1 or 2.
+ # You might want to override it from the command line with 0
+ # when running pvcreate on new PVs which are to be added to large VGs.
+
+ # pvmetadatacopies = 1
+
+ # Approximate default size of on-disk metadata areas in sectors.
+ # You should increase this if you have large volume groups or
+ # you want to retain a large on-disk history of your metadata changes.
+
+ # pvmetadatasize = 255
+
+ # List of directories holding live copies of text format metadata.
+ # These directories must not be on logical volumes!
+ # It's possible to use LVM2 with a couple of directories here,
+ # preferably on different (non-LV) filesystems, and with no other
+ # on-disk metadata (pvmetadatacopies = 0). Or this can be in
+ # addition to on-disk metadata areas.
+ # The feature was originally added to simplify testing and is not
+ # supported under low memory situations - the machine could lock up.
+ #
+ # Never edit any files in these directories by hand unless you
+ # you are absolutely sure you know what you are doing! Use
+ # the supplied toolset to make changes (e.g. vgcfgrestore).
+
+ # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
+#}
+
+# Event daemon
+#
+dmeventd {
+ # mirror_library is the library used when monitoring a mirror device.
+ #
+ # "libdevmapper-event-lvm2mirror.so" attempts to recover from
+ # failures. It removes failed devices from a volume group and
+ # reconfigures a mirror as necessary. If no mirror library is
+ # provided, mirrors are not monitored through dmeventd.
+
+ mirror_library = "libdevmapper-event-lvm2mirror.so"
+
+ # snapshot_library is the library used when monitoring a snapshot device.
+ #
+ # "libdevmapper-event-lvm2snapshot.so" monitors the filling of
+ # snapshots and emits a warning through syslog, when the use of
+ # snapshot exceedes 80%. The warning is repeated when 85%, 90% and
+ # 95% of the snapshot are filled.
+
+ snapshot_library = "libdevmapper-event-lvm2snapshot.so"
+}
diff --git a/contrib/puppet/files/etc/nova.conf b/contrib/puppet/files/etc/nova.conf
new file mode 100644
index 000000000..a0d64078c
--- /dev/null
+++ b/contrib/puppet/files/etc/nova.conf
@@ -0,0 +1,28 @@
+--ec2_url=http://192.168.255.1:8773/services/Cloud
+--rabbit_host=192.168.255.1
+--redis_host=192.168.255.1
+--s3_host=192.168.255.1
+--vpn_ip=192.168.255.1
+--datastore_path=/var/lib/nova/keeper
+--networks_path=/var/lib/nova/networks
+--instances_path=/var/lib/nova/instances
+--buckets_path=/var/lib/nova/objectstore/buckets
+--images_path=/var/lib/nova/objectstore/images
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--vlan_start=2000
+--vlan_end=3000
+--private_range=192.168.0.0/16
+--public_range=10.0.0.0/24
+--volume_group=vgdata
+--storage_dev=/dev/sdc
+--bridge_dev=eth2
+--aoe_eth_dev=eth2
+--public_interface=vlan0
+--default_kernel=aki-DEFAULT
+--default_ramdisk=ari-DEFAULT
+--vpn_image_id=ami-cloudpipe
+--daemonize
+--verbose
+--syslog
+--prefix=nova
diff --git a/contrib/puppet/files/production/boto.cfg b/contrib/puppet/files/production/boto.cfg
new file mode 100644
index 000000000..f4a2de2b6
--- /dev/null
+++ b/contrib/puppet/files/production/boto.cfg
@@ -0,0 +1,3 @@
+[Boto]
+debug = 0
+num_retries = 1
diff --git a/contrib/puppet/files/production/genvpn.sh b/contrib/puppet/files/production/genvpn.sh
new file mode 100644
index 000000000..538c3cd33
--- /dev/null
+++ b/contrib/puppet/files/production/genvpn.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This gets zipped and run on the cloudpipe-managed OpenVPN server
+NAME=$1
+SUBJ=$2
+
+mkdir -p projects/$NAME
+cd projects/$NAME
+
+# generate a server priv key
+openssl genrsa -out server.key 2048
+
+# generate a server CSR
+openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ"
+
+if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then
+ sudo chown -R nova:nogroup .
+fi
diff --git a/contrib/puppet/files/production/libvirt.qemu.xml.template b/contrib/puppet/files/production/libvirt.qemu.xml.template
new file mode 100644
index 000000000..114dfdc01
--- /dev/null
+++ b/contrib/puppet/files/production/libvirt.qemu.xml.template
@@ -0,0 +1,35 @@
+<domain type='%(type)s'>
+ <name>%(name)s</name>
+ <os>
+ <type>hvm</type>
+ <kernel>%(basepath)s/kernel</kernel>
+ <initrd>%(basepath)s/ramdisk</initrd>
+ <cmdline>root=/dev/vda1 console=ttyS0</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <memory>%(memory_kb)s</memory>
+ <vcpu>%(vcpus)s</vcpu>
+ <devices>
+ <disk type='file'>
+ <source file='%(basepath)s/disk'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='bridge'>
+ <source bridge='%(bridge_name)s'/>
+ <mac address='%(mac_address)s'/>
+ <!-- <model type='virtio'/> CANT RUN virtio network right now -->
+ <!--
+ <filterref filter="nova-instance-%(name)s">
+ <parameter name="IP" value="%(ip_address)s" />
+ <parameter name="DHCPSERVER" value="%(dhcp_server)s" />
+ </filterref>
+ -->
+ </interface>
+ <serial type="file">
+ <source path='%(basepath)s/console.log'/>
+ <target port='1'/>
+ </serial>
+ </devices>
+</domain>
diff --git a/contrib/puppet/files/production/my.cnf b/contrib/puppet/files/production/my.cnf
new file mode 100644
index 000000000..8777bc480
--- /dev/null
+++ b/contrib/puppet/files/production/my.cnf
@@ -0,0 +1,137 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+
+#
+# * IMPORTANT
+# If you make changes to these settings and your system uses apparmor, you may
+# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld.
+#
+
+user = mysql
+socket = /var/run/mysqld/mysqld.sock
+port = 3306
+basedir = /usr
+datadir = /var/lib/mysql
+tmpdir = /tmp
+skip-external-locking
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+# bind-address = 127.0.0.1
+#
+# * Fine Tuning
+#
+innodb_buffer_pool_size = 12G
+#innodb_log_file_size = 256M
+innodb_log_buffer_size=4M
+innodb_flush_log_at_trx_commit=2
+innodb_thread_concurrency=8
+innodb_flush_method=O_DIRECT
+key_buffer = 128M
+max_allowed_packet = 256M
+thread_stack = 8196K
+thread_cache_size = 32
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover = BACKUP
+max_connections = 1000
+table_cache = 1024
+#thread_concurrency = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit = 32M
+query_cache_size = 256M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+#general_log_file = /var/log/mysql/mysql.log
+#general_log = 1
+
+log_error = /var/log/mysql/error.log
+
+# Here you can see queries with especially long duration
+log_slow_queries = /var/log/mysql/mysql-slow.log
+long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+# other settings you may need to change.
+server-id = 1
+log_bin = /var/log/mysql/mysql-bin.log
+expire_logs_days = 10
+max_binlog_size = 50M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+#
+# * InnoDB
+#
+sync_binlog=1
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+
+
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 256M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 128M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+# The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
diff --git a/contrib/puppet/files/production/nova-iptables b/contrib/puppet/files/production/nova-iptables
new file mode 100755
index 000000000..b7b52df87
--- /dev/null
+++ b/contrib/puppet/files/production/nova-iptables
@@ -0,0 +1,185 @@
+#! /bin/sh
+
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): This script sets up some reasonable defaults for iptables and
+# creates nova-specific chains. If you use this script you should
+# run nova-network and nova-compute with --use_nova_chains=True
+
+
+# NOTE(vish): If you run public nova-api on a different port, make sure to
+# change the port here
+
+if [ -f /etc/default/nova-iptables ] ; then
+ . /etc/default/nova-iptables
+fi
+
+API_PORT=${API_PORT:-"8773"}
+
+if [ ! -n "$IP" ]; then
+ # NOTE(vish): IP address is what address the services ALLOW on.
+ # This will just get the first ip in the list, so if you
+ # have more than one eth device set up, this will fail, and
+ # you should explicitly pass in the ip of the instance
+ IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
+fi
+
+if [ ! -n "$PRIVATE_RANGE" ]; then
+ #NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP
+ PRIVATE_RANGE="192.168.0.0/12"
+fi
+
+if [ ! -n "$MGMT_IP" ]; then
+ # NOTE(vish): Management IP is the ip over which to allow ssh traffic. It
+ # will also allow traffic to nova-api
+ MGMT_IP="$IP"
+fi
+
+if [ ! -n "$DMZ_IP" ]; then
+ # NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access
+ DMZ_IP="$IP"
+fi
+
+clear_nova_iptables() {
+ iptables -P INPUT ACCEPT
+ iptables -P FORWARD ACCEPT
+ iptables -P OUTPUT ACCEPT
+ iptables -F
+ iptables -t nat -F
+ iptables -F services
+ iptables -X services
+ # HACK: re-adding fail2ban rules :(
+ iptables -N fail2ban-ssh
+ iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh
+ iptables -A fail2ban-ssh -j RETURN
+}
+
+load_nova_iptables() {
+
+ iptables -P INPUT DROP
+ iptables -A INPUT -m state --state INVALID -j DROP
+ iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+ # NOTE(ja): allow localhost for everything
+ iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT
+ # NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any
+ # address, since ssh should be listening only on internal
+ # before we re-add this rule we will need to add
+ # flexibility for RSYNC between omega/stingray
+ iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT
+ iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT
+ iptables -A INPUT -p icmp -j ACCEPT
+ iptables -N services
+ iptables -A INPUT -j services
+ iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset
+ iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable
+
+ iptables -P FORWARD DROP
+ iptables -A FORWARD -m state --state INVALID -j DROP
+ iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
+ iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
+
+ # NOTE(vish): DROP on output is too restrictive for now. We need to add
+ # in a bunch of more specific output rules to use it.
+ # iptables -P OUTPUT DROP
+ iptables -A OUTPUT -m state --state INVALID -j DROP
+ iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
+
+ if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT
+ iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT
+ fi
+
+ # if [ -n "$WEB" ] || [ -n "$ALL" ]; then
+ # # NOTE(vish): This opens up ports for web access, allowing web-based
+ # # dashboards to work.
+ # iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT
+ # iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT
+ # fi
+
+ if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then
+ # infrastructure
+ iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT
+ # clients
+ iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT
+ fi
+
+ if [ -n "$API" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT
+ if [ "$IP" != "$DMZ_IP" ]; then
+ iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT
+ fi
+ if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then
+ iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT
+ fi
+ fi
+
+ if [ -n "$REDIS" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT
+ fi
+
+ if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT
+ fi
+
+ if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT
+ iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT
+ iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT
+ fi
+
+ if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then
+ # NOTE(vish): this could theoretically be setup per network
+ # for each host, but it seems like overkill
+ iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
+ iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
+ iptables -A services -m udp -p udp --dport 67 -j ACCEPT
+ fi
+
+ if [ -n "$LDAP" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT
+ fi
+
+ if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then
+ iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT
+ iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT
+ fi
+}
+
+
+case "$1" in
+ start)
+ echo "Starting nova-iptables: "
+ load_nova_iptables
+ ;;
+ stop)
+ echo "Clearing nova-iptables: "
+ clear_nova_iptables
+ ;;
+ restart)
+ echo "Restarting nova-iptables: "
+ clear_nova_iptables
+ load_nova_iptables
+ ;;
+ *)
+ echo "Usage: $NAME {start|stop|restart}" >&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/contrib/puppet/files/production/nova-iscsi-dev.sh b/contrib/puppet/files/production/nova-iscsi-dev.sh
new file mode 100644
index 000000000..8eda10d2e
--- /dev/null
+++ b/contrib/puppet/files/production/nova-iscsi-dev.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# FILE: /etc/udev/scripts/iscsidev.sh
+
+BUS=${1}
+HOST=${BUS%%:*}
+
+[ -e /sys/class/iscsi_host ] || exit 1
+
+file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
+
+target_name=$(cat ${file})
+
+# This is not an open-scsi drive
+if [ -z "${target_name}" ]; then
+ exit 1
+fi
+
+echo "${target_name##*:}"
diff --git a/contrib/puppet/files/production/setup_data.sh b/contrib/puppet/files/production/setup_data.sh
new file mode 100755
index 000000000..1fbbac41c
--- /dev/null
+++ b/contrib/puppet/files/production/setup_data.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+/root/slap.sh
+mysql -e "DROP DATABASE nova"
+mysql -e "CREATE DATABASE nova"
+mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'"
+touch /root/installed
diff --git a/contrib/puppet/files/production/slap.sh b/contrib/puppet/files/production/slap.sh
new file mode 100755
index 000000000..f8ea16949
--- /dev/null
+++ b/contrib/puppet/files/production/slap.sh
@@ -0,0 +1,261 @@
+#!/usr/bin/env bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
+
+apt-get install -y slapd ldap-utils python-ldap
+
+cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
+#
+# LDAP Public Key Patch schema for use with openssh-ldappubkey
+# Author: Eric AUGE <eau@phear.org>
+#
+# Based on the proposal of : Mark Ruijter
+#
+
+
+# octetString SYNTAX
+attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
+ DESC 'MANDATORY: OpenSSH Public key'
+ EQUALITY octetStringMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
+
+# printableString SYNTAX yes|no
+objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
+ DESC 'MANDATORY: OpenSSH LPK objectclass'
+ MAY ( sshPublicKey $ uid )
+ )
+LPK_SCHEMA_EOF
+
+cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
+#
+# Person object for Nova
+# inetorgperson with extra attributes
+# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
+#
+#
+
+# using internet experimental oid arc as per BP64 3.1
+objectidentifier novaSchema 1.3.6.1.3.1.666.666
+objectidentifier novaAttrs novaSchema:3
+objectidentifier novaOCs novaSchema:4
+
+attributetype (
+ novaAttrs:1
+ NAME 'accessKey'
+ DESC 'Key for accessing data'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:2
+ NAME 'secretKey'
+ DESC 'Secret key'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:3
+ NAME 'keyFingerprint'
+ DESC 'Fingerprint of private key'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:4
+ NAME 'isAdmin'
+ DESC 'Is user an administrator?'
+ EQUALITY booleanMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:5
+ NAME 'projectManager'
+ DESC 'Project Managers of a project'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
+ )
+
+objectClass (
+ novaOCs:1
+ NAME 'novaUser'
+ DESC 'access and secret keys'
+ AUXILIARY
+ MUST ( uid )
+ MAY ( accessKey $ secretKey $ isAdmin )
+ )
+
+objectClass (
+ novaOCs:2
+ NAME 'novaKeyPair'
+ DESC 'Key pair for User'
+ SUP top
+ STRUCTURAL
+ MUST ( cn $ sshPublicKey $ keyFingerprint )
+ )
+
+objectClass (
+ novaOCs:3
+ NAME 'novaProject'
+ DESC 'Container for project'
+ SUP groupOfNames
+ STRUCTURAL
+ MUST ( cn $ projectManager )
+ )
+
+NOVA_SCHEMA_EOF
+
+mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
+cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
+# slapd.conf - Configuration file for LDAP SLAPD
+##########
+# Basics #
+##########
+include /etc/ldap/schema/core.schema
+include /etc/ldap/schema/cosine.schema
+include /etc/ldap/schema/inetorgperson.schema
+include /etc/ldap/schema/openssh-lpk_openldap.schema
+include /etc/ldap/schema/nova.schema
+pidfile /var/run/slapd/slapd.pid
+argsfile /var/run/slapd/slapd.args
+loglevel none
+modulepath /usr/lib/ldap
+# modulepath /usr/local/libexec/openldap
+moduleload back_hdb
+##########################
+# Database Configuration #
+##########################
+database hdb
+suffix "dc=example,dc=com"
+rootdn "cn=Manager,dc=example,dc=com"
+rootpw changeme
+directory /var/lib/ldap
+# directory /usr/local/var/openldap-data
+index objectClass,cn eq
+########
+# ACLs #
+########
+access to attrs=userPassword
+ by anonymous auth
+ by self write
+ by * none
+access to *
+ by self write
+ by * none
+SLAPD_CONF_EOF
+
+mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig
+
+cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
+# LDAP Client Settings
+URI ldap://localhost
+BASE dc=example,dc=com
+BINDDN cn=Manager,dc=example,dc=com
+SIZELIMIT 0
+TIMELIMIT 0
+LDAP_CONF_EOF
+
+cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
+# This is the root of the directory tree
+dn: dc=example,dc=com
+description: Example.Com, your trusted non-existent corporation.
+dc: example
+o: Example.Com
+objectClass: top
+objectClass: dcObject
+objectClass: organization
+
+# Subtree for users
+dn: ou=Users,dc=example,dc=com
+ou: Users
+description: Users
+objectClass: organizationalUnit
+
+# Subtree for groups
+dn: ou=Groups,dc=example,dc=com
+ou: Groups
+description: Groups
+objectClass: organizationalUnit
+
+# Subtree for system accounts
+dn: ou=System,dc=example,dc=com
+ou: System
+description: Special accounts used by software applications.
+objectClass: organizationalUnit
+
+# Special Account for Authentication:
+dn: uid=authenticate,ou=System,dc=example,dc=com
+uid: authenticate
+ou: System
+description: Special account for authenticating users
+userPassword: {MD5}TODO-000000000000000000000000000==
+objectClass: account
+objectClass: simpleSecurityObject
+
+# create the sysadmin entry
+
+dn: cn=developers,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: developers
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=sysadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: sysadmins
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=netadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: netadmins
+description: Network admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: cloudadmins
+description: Cloud admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=itsec,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: itsec
+description: IT security users group
+member: uid=admin,ou=Users,dc=example,dc=com
+BASE_LDIF_EOF
+
+/etc/init.d/slapd stop
+rm -rf /var/lib/ldap/*
+rm -rf /etc/ldap/slapd.d/*
+slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d
+cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG
+slapadd -v -l /etc/ldap/base.ldif
+chown -R openldap:openldap /etc/ldap/slapd.d
+chown -R openldap:openldap /var/lib/ldap
+/etc/init.d/slapd start
diff --git a/contrib/puppet/fileserver.conf b/contrib/puppet/fileserver.conf
new file mode 100644
index 000000000..6e2984b8c
--- /dev/null
+++ b/contrib/puppet/fileserver.conf
@@ -0,0 +1,8 @@
+# fileserver.conf
+
+[files]
+path /srv/cloud/puppet/files
+allow 10.0.0.0/24
+
+[plugins]
+
diff --git a/contrib/puppet/manifests/classes/apt.pp b/contrib/puppet/manifests/classes/apt.pp
new file mode 100644
index 000000000..03022aeef
--- /dev/null
+++ b/contrib/puppet/manifests/classes/apt.pp
@@ -0,0 +1 @@
+exec { "update-apt": command => "/usr/bin/apt-get update" }
diff --git a/contrib/puppet/manifests/classes/issue.pp b/contrib/puppet/manifests/classes/issue.pp
new file mode 100644
index 000000000..8bb37ee3f
--- /dev/null
+++ b/contrib/puppet/manifests/classes/issue.pp
@@ -0,0 +1,14 @@
+class issue {
+ file { "/etc/issue":
+ owner => "root",
+ group => "root",
+ mode => 444,
+ source => "puppet://${puppet_server}/files/etc/issue",
+ }
+ file { "/etc/issue.net":
+ owner => "root",
+ group => "root",
+ mode => 444,
+ source => "puppet://${puppet_server}/files/etc/issue",
+ }
+}
diff --git a/contrib/puppet/manifests/classes/kern_module.pp b/contrib/puppet/manifests/classes/kern_module.pp
new file mode 100644
index 000000000..00ec0636c
--- /dev/null
+++ b/contrib/puppet/manifests/classes/kern_module.pp
@@ -0,0 +1,34 @@
+# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns
+
+define kern_module ($ensure) {
+ $modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" }
+ case $operatingsystem {
+ redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } }
+ }
+ case $ensure {
+ present: {
+ exec { "insert_module_${name}":
+ command => $operatingsystem ? {
+ ubuntu => "/bin/echo '${name}' >> '${modulesfile}'",
+ redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' "
+ },
+ unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
+ }
+ exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" }
+ }
+ absent: {
+ exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
+ exec { "remove_module_${name}":
+ command => $operatingsystem ? {
+ ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'",
+ redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'"
+ },
+ onlyif => $operatingsystem ? {
+ ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'",
+ redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'"
+ }
+ }
+ }
+ default: { err ( "unknown ensure value ${ensure}" ) }
+ }
+}
diff --git a/contrib/puppet/manifests/classes/loopback.pp b/contrib/puppet/manifests/classes/loopback.pp
new file mode 100644
index 000000000..e0fa9d541
--- /dev/null
+++ b/contrib/puppet/manifests/classes/loopback.pp
@@ -0,0 +1,6 @@
+define loopback($num) {
+ exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}":
+ creates => "/dev/loop${num}",
+ path => ["/usr/bin", "/usr/sbin", "/bin"]
+ }
+}
diff --git a/contrib/puppet/manifests/classes/lvm.pp b/contrib/puppet/manifests/classes/lvm.pp
new file mode 100644
index 000000000..5a407abcb
--- /dev/null
+++ b/contrib/puppet/manifests/classes/lvm.pp
@@ -0,0 +1,8 @@
+class lvm {
+ file { "/etc/lvm/lvm.conf":
+ owner => "root",
+ group => "root",
+ mode => 444,
+ source => "puppet://${puppet_server}/files/etc/lvm.conf",
+ }
+}
diff --git a/contrib/puppet/manifests/classes/lvmconf.pp b/contrib/puppet/manifests/classes/lvmconf.pp
new file mode 100644
index 000000000..4aa7ddfdc
--- /dev/null
+++ b/contrib/puppet/manifests/classes/lvmconf.pp
@@ -0,0 +1,8 @@
+class lvmconf {
+ file { "/etc/lvm/lvm.conf":
+ owner => "root", group => "root", mode => 644,
+ source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf",
+ ensure => present
+ }
+}
+
diff --git a/contrib/puppet/manifests/classes/nova.pp b/contrib/puppet/manifests/classes/nova.pp
new file mode 100644
index 000000000..e942860f4
--- /dev/null
+++ b/contrib/puppet/manifests/classes/nova.pp
@@ -0,0 +1,464 @@
+import "kern_module"
+import "apt"
+import "loopback"
+
+#$head_node_ip = "undef"
+#$rabbit_ip = "undef"
+#$vpn_ip = "undef"
+#$public_interface = "undef"
+#$vlan_start = "5000"
+#$vlan_end = "6000"
+#$private_range = "10.0.0.0/16"
+#$public_range = "192.168.177.0/24"
+
+define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") {
+ file { "/etc/init.d/nova-iptables":
+ owner => "root", mode => 755,
+ source => "puppet://${puppet_server}/files/production/nova-iptables",
+ }
+
+ file { "/etc/default/nova-iptables":
+ owner => "root", mode => 644,
+ content => template("nova-iptables.erb")
+ }
+}
+
+define nova_conf_pointer($name) {
+ file { "/etc/nova/nova-${name}.conf":
+ owner => "nova", mode => 400,
+ content => "--flagfile=/etc/nova/nova.conf"
+ }
+}
+
+class novaconf {
+ file { "/etc/nova/nova.conf":
+ owner => "nova", mode => 400,
+ content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb")
+ }
+ nova_conf_pointer{'manage': name => 'manage'}
+}
+
+class novadata {
+ package { "rabbitmq-server": ensure => present }
+
+ file { "/etc/rabbitmq/rabbitmq.conf":
+ owner => "root", mode => 644,
+ content => "NODENAME=rabbit@localhost",
+ }
+
+ service { "rabbitmq-server":
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ require => [
+ File["/etc/rabbitmq/rabbitmq.conf"],
+ Package["rabbitmq-server"]
+ ]
+ }
+
+ package { "mysql-server": ensure => present }
+
+ file { "/etc/mysql/my.cnf":
+ owner => "root", mode => 644,
+ source => "puppet://${puppet_server}/files/production/my.cnf",
+ }
+
+ service { "mysql":
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ require => [
+ File["/etc/mysql/my.cnf"],
+ Package["mysql-server"]
+ ]
+ }
+
+ file { "/root/slap.sh":
+ owner => "root", mode => 755,
+ source => "puppet://${puppet_server}/files/production/slap.sh",
+ }
+
+ file { "/root/setup_data.sh":
+ owner => "root", mode => 755,
+ source => "puppet://${puppet_server}/files/production/setup_data.sh",
+ }
+
+ # setup compute data
+ exec { "setup_data":
+ command => "/root/setup_data.sh",
+ path => "/usr/bin:/bin",
+ unless => "test -f /root/installed",
+ require => [
+ Service["mysql"],
+ File["/root/slap.sh"],
+ File["/root/setup_data.sh"]
+ ]
+ }
+}
+
+define nscheduler($version) {
+ package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] }
+ nova_conf_pointer{'scheduler': name => 'scheduler'}
+ exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/init.d/nova-scheduler",
+ unless => "test -f /etc/rc2.d/S50nova-scheduler"
+ }
+ service { "nova-scheduler":
+ ensure => running,
+ hasstatus => true,
+ subscribe => [
+ Package["nova-scheduler"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-scheduler.conf"]
+ ]
+ }
+
+}
+
+define napi($version, $api_servers, $api_base_port) {
+ file { "/etc/boto.cfg":
+ owner => "root", mode => 644,
+ source => "puppet://${puppet_server}/files/production/boto.cfg",
+ }
+
+ file { "/var/lib/nova/CA/genvpn.sh":
+ owner => "nova", mode => 755,
+ source => "puppet://${puppet_server}/files/production/genvpn.sh",
+ }
+
+ package { "python-greenlet": ensure => present }
+ package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] }
+ nova_conf_pointer{'api': name => 'api'}
+
+ exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/init.d/nova-api",
+ unless => "test -f /etc/rc2.d/S50nova-api"
+ }
+
+ service { "nova-netsync":
+ start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start",
+ stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop",
+ ensure => running,
+ hasstatus => false,
+ pattern => "nova-netsync",
+ require => Service["nova-api"],
+ subscribe => File["/etc/nova/nova.conf"]
+ }
+ service { "nova-api":
+ start => "monit start all -g nova_api",
+ stop => "monit stop all -g nova_api",
+ restart => "monit restart all -g nova_api",
+ # ensure => running,
+ # hasstatus => true,
+ require => Service["monit"],
+ subscribe => [
+ Package["nova-objectstore"],
+ File["/etc/boto.cfg"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-objectstore.conf"]
+ ]
+ }
+
+ # the haproxy & monit's template use $api_servers and $api_base_port
+
+ package { "haproxy": ensure => present }
+ file { "/etc/default/haproxy":
+ owner => "root", mode => 644,
+ content => "ENABLED=1",
+ require => Package['haproxy']
+ }
+ file { "/etc/haproxy/haproxy.cfg":
+ owner => "root", mode => 644,
+ content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"),
+ require => Package['haproxy']
+ }
+ service { "haproxy":
+ ensure => true,
+ enable => true,
+ hasstatus => true,
+ subscribe => [
+ Package["haproxy"],
+ File["/etc/default/haproxy"],
+ File["/etc/haproxy/haproxy.cfg"],
+ ]
+ }
+
+ package { "socat": ensure => present }
+
+ file { "/usr/local/bin/gmetric_haproxy.sh":
+ owner => "root", mode => 755,
+ source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh",
+ }
+
+ cron { "gmetric_haproxy":
+ command => "/usr/local/bin/gmetric_haproxy.sh",
+ user => root,
+ minute => "*/3",
+ }
+
+ package { "monit": ensure => present }
+
+ file { "/etc/default/monit":
+ owner => "root", mode => 644,
+ content => "startup=1",
+ require => Package['monit']
+ }
+ file { "/etc/monit/monitrc":
+ owner => "root", mode => 600,
+ content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"),
+ require => Package['monit']
+ }
+ service { "monit":
+ ensure => true,
+ pattern => "sbin/monit",
+ subscribe => [
+ Package["monit"],
+ File["/etc/default/monit"],
+ File["/etc/monit/monitrc"],
+ ]
+ }
+
+}
+
+
+define nnetwork($version) {
+ # kill the default network added by the package
+ exec { "kill-libvirt-default-net":
+ command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
+ path => "/usr/bin:/bin",
+ onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
+ }
+
+ # EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object
+ file { "/usr/sbin/dnsmasq":
+ owner => "root", group => "root",
+ source => "puppet://${puppet_server}/files/production/dnsmasq",
+ }
+
+ package { "nova-network": ensure => $version, require => Exec["update-apt"] }
+ nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'}
+ nova_conf_pointer{'network': name => "network" }
+
+ exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/init.d/nova-network",
+ unless => "test -f /etc/rc2.d/S50nova-network"
+ }
+ service { "nova-network":
+ ensure => running,
+ hasstatus => true,
+ subscribe => [
+ Package["nova-network"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-network.conf"]
+ ]
+ }
+}
+
+define nobjectstore($version) {
+ package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] }
+ nova_conf_pointer{'objectstore': name => 'objectstore'}
+ exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/init.d/nova-objectstore",
+ unless => "test -f /etc/rc2.d/S50nova-objectstore"
+ }
+ service { "nova-objectstore":
+ ensure => running,
+ hasstatus => true,
+ subscribe => [
+ Package["nova-objectstore"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-objectstore.conf"]
+ ]
+ }
+}
+
+define ncompute($version) {
+ include ganglia-python
+ include ganglia-compute
+
+ # kill the default network added by the package
+ exec { "kill-libvirt-default-net":
+ command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
+ path => "/usr/bin:/bin",
+ onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
+ }
+
+
+ # LIBVIRT has to be restarted when ebtables / gawk is installed
+ service { "libvirt-bin":
+ ensure => running,
+ pattern => "sbin/libvirtd",
+ subscribe => [
+ Package["ebtables"],
+ Kern_module["kvm_intel"]
+ ],
+ require => [
+ Package["libvirt-bin"],
+ Package["ebtables"],
+ Package["gawk"],
+ Kern_module["kvm_intel"],
+ File["/dev/kvm"]
+ ]
+ }
+
+ package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" }
+ package { "ebtables": ensure => present }
+ package { "gawk": ensure => present }
+
+ # ensure proper permissions on /dev/kvm
+ file { "/dev/kvm":
+ owner => "root",
+ group => "kvm",
+ mode => 660
+ }
+
+ # require hardware virt
+ kern_module { "kvm_intel":
+ ensure => present,
+ }
+
+ # increase loopback devices
+ file { "/etc/modprobe.d/loop.conf":
+ owner => "root", mode => 644,
+ content => "options loop max_loop=40"
+ }
+
+ nova_conf_pointer{'compute': name => 'compute'}
+
+ loopback{loop0: num => 0}
+ loopback{loop1: num => 1}
+ loopback{loop2: num => 2}
+ loopback{loop3: num => 3}
+ loopback{loop4: num => 4}
+ loopback{loop5: num => 5}
+ loopback{loop6: num => 6}
+ loopback{loop7: num => 7}
+ loopback{loop8: num => 8}
+ loopback{loop9: num => 9}
+ loopback{loop10: num => 10}
+ loopback{loop11: num => 11}
+ loopback{loop12: num => 12}
+ loopback{loop13: num => 13}
+ loopback{loop14: num => 14}
+ loopback{loop15: num => 15}
+ loopback{loop16: num => 16}
+ loopback{loop17: num => 17}
+ loopback{loop18: num => 18}
+ loopback{loop19: num => 19}
+ loopback{loop20: num => 20}
+ loopback{loop21: num => 21}
+ loopback{loop22: num => 22}
+ loopback{loop23: num => 23}
+ loopback{loop24: num => 24}
+ loopback{loop25: num => 25}
+ loopback{loop26: num => 26}
+ loopback{loop27: num => 27}
+ loopback{loop28: num => 28}
+ loopback{loop29: num => 29}
+ loopback{loop30: num => 30}
+ loopback{loop31: num => 31}
+ loopback{loop32: num => 32}
+ loopback{loop33: num => 33}
+ loopback{loop34: num => 34}
+ loopback{loop35: num => 35}
+ loopback{loop36: num => 36}
+ loopback{loop37: num => 37}
+ loopback{loop38: num => 38}
+ loopback{loop39: num => 39}
+
+ package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" }
+
+ package { "nova-compute":
+ ensure => "$version",
+ require => Package["python-libvirt"]
+ }
+
+ #file { "/usr/share/nova/libvirt.qemu.xml.template":
+ # owner => "nova", mode => 400,
+ # source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template",
+ #}
+
+ # fix runlevels: using enable => true adds it as 20, which is too early
+ exec { "update-rc.d -f nova-compute remove":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/rc2.d/S??nova-compute"
+ }
+ service { "nova-compute":
+ ensure => running,
+ hasstatus => true,
+ subscribe => [
+ Package["nova-compute"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-compute.conf"],
+ #File["/usr/share/nova/libvirt.qemu.xml.template"],
+ Service["libvirt-bin"],
+ Kern_module["kvm_intel"]
+ ]
+ }
+}
+
+define nvolume($version) {
+
+ package { "nova-volume": ensure => $version, require => Exec["update-apt"] }
+
+ nova_conf_pointer{'volume': name => 'volume'}
+
+ # fix runlevels: using enable => true adds it as 20, which is too early
+ exec { "update-rc.d -f nova-volume remove":
+ path => "/usr/bin:/usr/sbin:/bin",
+ onlyif => "test -f /etc/rc2.d/S??nova-volume"
+ }
+
+ file { "/etc/default/iscsitarget":
+ owner => "root", mode => 644,
+ content => "ISCSITARGET_ENABLE=true"
+ }
+
+ package { "iscsitarget": ensure => present }
+
+ file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode?
+ file { "/usr/sbin/nova-iscsi-dev.sh":
+ owner => "root", mode => 755,
+ source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh"
+ }
+ file { "/etc/udev/rules.d/55-openiscsi.rules":
+ owner => "root", mode => 644,
+ content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"'
+ }
+
+ service { "iscsitarget":
+ ensure => running,
+ enable => true,
+ hasstatus => true,
+ require => [
+ File["/etc/default/iscsitarget"],
+ Package["iscsitarget"]
+ ]
+ }
+
+ service { "nova-volume":
+ ensure => running,
+ hasstatus => true,
+ subscribe => [
+ Package["nova-volume"],
+ File["/etc/nova/nova.conf"],
+ File["/etc/nova/nova-volume.conf"]
+ ]
+ }
+}
+
+class novaspool {
+ # This isn't in release yet
+ #cron { logspool:
+ # command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool",
+ # user => "nova"
+ #}
+ #cron { spoolsentry:
+ # command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool",
+ # user => "nova"
+ #}
+}
diff --git a/contrib/puppet/manifests/classes/swift.pp b/contrib/puppet/manifests/classes/swift.pp
new file mode 100644
index 000000000..64ffb6fa3
--- /dev/null
+++ b/contrib/puppet/manifests/classes/swift.pp
@@ -0,0 +1,7 @@
+class swift {
+ package { "memcached": ensure => present }
+ service { "memcached": require => Package['memcached'] }
+
+ package { "swift-proxy": ensure => present }
+}
+
diff --git a/contrib/puppet/manifests/site.pp b/contrib/puppet/manifests/site.pp
new file mode 100644
index 000000000..ca07a34ad
--- /dev/null
+++ b/contrib/puppet/manifests/site.pp
@@ -0,0 +1,120 @@
+# site.pp
+
+import "templates"
+import "classes/*"
+
+node novabase inherits default {
+# $puppet_server = "192.168.0.10"
+ $cluster_name = "openstack001"
+ $ganglia_udp_send_channel = "openstack001.example.com"
+ $syslog = "192.168.0.10"
+
+ # THIS STUFF ISN'T IN RELEASE YET
+ #$sentry_url = "http://192.168.0.19/sentry/store/"
+ #$sentry_key = "TODO:SENTRYPASS"
+
+ $local_network = "192.168.0.0/16"
+ $vpn_ip = "192.168.0.2"
+ $public_interface = "eth0"
+ include novanode
+# include nova-common
+ include opsmetrics
+
+# non-nova stuff such as nova-dash inherit from novanode
+# novaspool needs a better home
+# include novaspool
+}
+
+# Builder
+node "nova000.example.com" inherits novabase {
+ $syslog = "server"
+ include ntp
+ include syslog-server
+}
+
+# Non-Nova nodes
+
+node
+ "blog.example.com",
+ "wiki.example.com"
+inherits novabase {
+ include ganglia-python
+ include ganglia-apache
+ include ganglia-mysql
+}
+
+
+node "nova001.example.com"
+inherits novabase {
+ include novabase
+
+ nova_iptables { nova:
+ services => [
+ "ganglia",
+ "mysql",
+ "rabbitmq",
+ "ldap",
+ "api",
+ "objectstore",
+ "nrpe",
+ ],
+ ip => "192.168.0.10",
+ }
+
+ nobjectstore { nova: version => "0.9.0" }
+ nscheduler { nova: version => "0.9.0" }
+ napi { nova:
+ version => "0.9.0",
+ api_servers => 10,
+ api_base_port => 8000
+ }
+}
+
+node "nova002.example.com"
+inherits novabase {
+ include novaconf
+
+ nova_iptables { nova:
+ services => [
+ "ganglia",
+ "dnsmasq",
+ "nrpe"
+ ],
+ ip => "192.168.4.2",
+ private_range => "192.168.0.0/16",
+ }
+
+ nnetwork { nova: version => "0.9.0" }
+}
+
+node
+ "nova003.example.com",
+ "nova004.example.com",
+ "nova005.example.com",
+ "nova006.example.com",
+ "nova007.example.com",
+ "nova008.example.com",
+ "nova009.example.com",
+ "nova010.example.com",
+ "nova011.example.com",
+ "nova012.example.com",
+ "nova013.example.com",
+ "nova014.example.com",
+ "nova015.example.com",
+ "nova016.example.com",
+ "nova017.example.com",
+ "nova018.example.com",
+ "nova019.example.com",
+inherits novabase {
+ include novaconf
+ ncompute { nova: version => "0.9.0" }
+ nvolume { nova: version => "0.9.0" }
+}
+
+#node
+# "nova020.example.com"
+# "nova021.example.com"
+#inherits novanode {
+# include novaconf
+ #ncompute { nova: version => "0.9.0" }
+#}
diff --git a/contrib/puppet/manifests/templates.pp b/contrib/puppet/manifests/templates.pp
new file mode 100644
index 000000000..90e433013
--- /dev/null
+++ b/contrib/puppet/manifests/templates.pp
@@ -0,0 +1,21 @@
+# templates.pp
+
+import "classes/*"
+
+class baseclass {
+# include dns-client # FIXME: missing resolv.conf.erb??
+ include issue
+}
+
+node default {
+ $nova_site = "undef"
+ $nova_ns1 = "undef"
+ $nova_ns2 = "undef"
+# include baseclass
+}
+
+# novanode handles the system-level requirements for Nova/Swift nodes
+class novanode {
+ include baseclass
+ include lvmconf
+}
diff --git a/contrib/puppet/puppet.conf b/contrib/puppet/puppet.conf
new file mode 100644
index 000000000..92af920e3
--- /dev/null
+++ b/contrib/puppet/puppet.conf
@@ -0,0 +1,11 @@
+[main]
+logdir=/var/log/puppet
+vardir=/var/lib/puppet
+ssldir=/var/lib/puppet/ssl
+rundir=/var/run/puppet
+factpath=$vardir/lib/facter
+pluginsync=false
+
+[puppetmasterd]
+templatedir=/var/lib/nova/contrib/puppet/templates
+autosign=true
diff --git a/contrib/puppet/templates/haproxy.cfg.erb b/contrib/puppet/templates/haproxy.cfg.erb
new file mode 100644
index 000000000..bd9991de7
--- /dev/null
+++ b/contrib/puppet/templates/haproxy.cfg.erb
@@ -0,0 +1,39 @@
+# this config needs haproxy-1.1.28 or haproxy-1.2.1
+
+global
+ log 127.0.0.1 local0
+ log 127.0.0.1 local1 notice
+ #log loghost local0 info
+ maxconn 4096
+ #chroot /usr/share/haproxy
+ stats socket /var/run/haproxy.sock
+ user haproxy
+ group haproxy
+ daemon
+ #debug
+ #quiet
+
+defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ retries 3
+ option redispatch
+ stats enable
+ stats uri /haproxy
+ maxconn 2000
+ contimeout 5000
+ clitimeout 50000
+ srvtimeout 50000
+
+
+listen nova-api 0.0.0.0:8773
+ option httpchk GET / HTTP/1.0\r\nHost:\ example.com
+ option forwardfor
+ reqidel ^X-Forwarded-For:.*
+ balance roundrobin
+<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%>
+ server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check
+<% end -%>
+ option httpclose # disable keep-alive
diff --git a/contrib/puppet/templates/monitrc-nova-api.erb b/contrib/puppet/templates/monitrc-nova-api.erb
new file mode 100644
index 000000000..fe2626327
--- /dev/null
+++ b/contrib/puppet/templates/monitrc-nova-api.erb
@@ -0,0 +1,138 @@
+###############################################################################
+## Monit control file
+###############################################################################
+##
+## Comments begin with a '#' and extend through the end of the line. Keywords
+## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'.
+##
+## Below you will find examples of some frequently used statements. For
+## information about the control file, a complete list of statements and
+## options please have a look in the monit manual.
+##
+##
+###############################################################################
+## Global section
+###############################################################################
+##
+## Start monit in the background (run as a daemon):
+#
+set daemon 60 # check services at 1-minute intervals
+ with start delay 30 # optional: delay the first check by half a minute
+ # (by default check immediately after monit start)
+
+
+## Set syslog logging with the 'daemon' facility. If the FACILITY option is
+## omitted, monit will use 'user' facility by default. If you want to log to
+## a stand alone log file instead, specify the path to a log file
+#
+set logfile syslog facility log_daemon
+#
+#
+### Set the location of monit id file which saves the unique id specific for
+### given monit. The id is generated and stored on first monit start.
+### By default the file is placed in $HOME/.monit.id.
+#
+# set idfile /var/.monit.id
+#
+### Set the location of monit state file which saves the monitoring state
+### on each cycle. By default the file is placed in $HOME/.monit.state. If
+### state file is stored on persistent filesystem, monit will recover the
+### monitoring state across reboots. If it is on temporary filesystem, the
+### state will be lost on reboot.
+#
+# set statefile /var/.monit.state
+#
+## Set the list of mail servers for alert delivery. Multiple servers may be
+## specified using comma separator. By default monit uses port 25 - this
+## is possible to override with the PORT option.
+#
+# set mailserver mail.bar.baz, # primary mailserver
+# backup.bar.baz port 10025, # backup mailserver on port 10025
+# localhost # fallback relay
+#
+#
+## By default monit will drop alert events if no mail servers are available.
+## If you want to keep the alerts for a later delivery retry, you can use the
+## EVENTQUEUE statement. The base directory where undelivered alerts will be
+## stored is specified by the BASEDIR option. You can limit the maximal queue
+## size using the SLOTS option (if omitted, the queue is limited by space
+## available in the back end filesystem).
+#
+# set eventqueue
+# basedir /var/monit # set the base directory where events will be stored
+# slots 100 # optionaly limit the queue size
+#
+#
+## Send status and events to M/Monit (Monit central management: for more
+## informations about M/Monit see http://www.tildeslash.com/mmonit).
+#
+# set mmonit http://monit:monit@192.168.1.10:8080/collector
+#
+#
+## Monit by default uses the following alert mail format:
+##
+## --8<--
+## From: monit@$HOST # sender
+## Subject: monit alert -- $EVENT $SERVICE # subject
+##
+## $EVENT Service $SERVICE #
+## #
+## Date: $DATE #
+## Action: $ACTION #
+## Host: $HOST # body
+## Description: $DESCRIPTION #
+## #
+## Your faithful employee, #
+## monit #
+## --8<--
+##
+## You can override this message format or parts of it, such as subject
+## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc.
+## are expanded at runtime. For example, to override the sender:
+#
+# set mail-format { from: monit@foo.bar }
+#
+#
+## You can set alert recipients here whom will receive alerts if/when a
+## service defined in this file has errors. Alerts may be restricted on
+## events by using a filter as in the second example below.
+#
+# set alert sysadm@foo.bar # receive all alerts
+# set alert manager@foo.bar only on { timeout } # receive just service-
+# # timeout alert
+#
+#
+## Monit has an embedded web server which can be used to view status of
+## services monitored, the current configuration, actual services parameters
+## and manage services from a web interface.
+#
+ set httpd port 2812 and
+ use address localhost # only accept connection from localhost
+ allow localhost # allow localhost to connect to the server and
+# allow admin:monit # require user 'admin' with password 'monit'
+# allow @monit # allow users of group 'monit' to connect (rw)
+# allow @users readonly # allow users of group 'users' to connect readonly
+#
+#
+###############################################################################
+## Services
+###############################################################################
+
+<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %>
+
+check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid
+ group nova_api
+ start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start"
+ as uid nova
+ stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop"
+ as uid nova
+ if failed port <%= port %> protocol http
+ with timeout 15 seconds
+ for 4 cycles
+ then restart
+ if totalmem > 300 Mb then restart
+ if cpu is greater than 60% for 2 cycles then alert
+ if cpu > 80% for 3 cycles then restart
+ if 3 restarts within 5 cycles then timeout
+
+<% end %>
diff --git a/contrib/puppet/templates/nova-iptables.erb b/contrib/puppet/templates/nova-iptables.erb
new file mode 100644
index 000000000..2fc066305
--- /dev/null
+++ b/contrib/puppet/templates/nova-iptables.erb
@@ -0,0 +1,10 @@
+<% services.each do |service| -%>
+<%= service.upcase %>=1
+<% end -%>
+<% if ip && ip != "" %>IP="<%=ip%>"<% end %>
+<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %>
+<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %>
+<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %>
+
+# warning: this file is auto-generated by puppet
+
diff --git a/contrib/puppet/templates/production/nova-common.conf.erb b/contrib/puppet/templates/production/nova-common.conf.erb
new file mode 100644
index 000000000..00c110781
--- /dev/null
+++ b/contrib/puppet/templates/production/nova-common.conf.erb
@@ -0,0 +1,56 @@
+# global
+--dmz_net=192.168.0.0
+--dmz_mask=255.255.0.0
+--dmz_cidr=192.168.0.0/16
+--ldap_user_dn=cn=Administrators,dc=example,dc=com
+--ldap_user_unit=Users
+--ldap_user_subtree=ou=Users,dc=example,dc=com
+--ldap_project_subtree=ou=Groups,dc=example,dc=com
+--role_project_subtree=ou=Groups,dc=example,dc=com
+--ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com
+--ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com
+--ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com
+--ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com
+--ldap_developer=cn=developers,ou=Groups,dc=example,dc=com
+--verbose
+--daemonize
+--syslog
+--networks_path=/var/lib/nova/networks
+--instances_path=/var/lib/nova/instances
+--buckets_path=/var/lib/nova/objectstore/buckets
+--images_path=/var/lib/nova/objectstore/images
+--scheduler_driver=nova.scheduler.simple.SimpleScheduler
+--libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template
+--credentials_template=/usr/share/nova/novarc.template
+--boot_script_template=/usr/share/nova/bootscript.template
+--vpn_client_template=/usr/share/nova/client.ovpn.template
+--max_cores=40
+--max_gigabytes=2000
+--ca_path=/var/lib/nova/CA
+--keys_path=/var/lib/nova/keys
+--vpn_start=11000
+--volume_group=vgdata
+--volume_manager=nova.volume.manager.ISCSIManager
+--volume_driver=nova.volume.driver.ISCSIDriver
+--default_kernel=aki-DEFAULT
+--default_ramdisk=ari-DEFAULT
+--dhcpbridge=/usr/bin/nova-dhcpbridge
+--vpn_image_id=ami-cloudpipe
+--dhcpbridge_flagfile=/etc/nova/nova.conf
+--credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s
+--auth_driver=nova.auth.ldapdriver.LdapDriver
+--quota_cores=17
+--quota_floating_ips=5
+--quota_instances=6
+--quota_volumes=10
+--quota_gigabytes=100
+--use_nova_chains=True
+--input_chain=services
+--FAKE_subdomain=ec2
+--use_project_ca=True
+--fixed_ip_disassociate_timeout=300
+--api_max_requests=1
+--api_listen_ip=127.0.0.1
+--user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s
+--project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s
+--vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s
diff --git a/contrib/puppet/templates/production/nova-nova.conf.erb b/contrib/puppet/templates/production/nova-nova.conf.erb
new file mode 100644
index 000000000..8683fefde
--- /dev/null
+++ b/contrib/puppet/templates/production/nova-nova.conf.erb
@@ -0,0 +1,21 @@
+--fixed_range=192.168.0.0/16
+--iscsi_ip_prefix=192.168.4
+--floating_range=10.0.0.0/24
+--rabbit_host=192.168.0.10
+--s3_host=192.168.0.10
+--cc_host=192.168.0.10
+--cc_dmz=192.168.24.10
+--s3_dmz=192.168.24.10
+--ec2_url=http://192.168.0.1:8773/services/Cloud
+--vpn_ip=192.168.0.2
+--ldap_url=ldap://192.168.0.10
+--sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
+--other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
+--routing_source_ip=192.168.0.2
+--bridge_dev=eth1
+--public_interface=eth0
+--vlan_start=3100
+--num_networks=700
+--rabbit_userid=TODO:RABBIT
+--rabbit_password=TODO:CHANGEME
+--ldap_password=TODO:CHANGEME
diff --git a/doc/ext/nova_todo.py b/doc/ext/nova_todo.py
index 7a06b1bf9..efc0c3edd 100644
--- a/doc/ext/nova_todo.py
+++ b/doc/ext/nova_todo.py
@@ -3,10 +3,12 @@
from sphinx.ext.todo import *
from docutils.parsers.rst import directives
+import re
def _(s):
return s
+
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
@@ -19,20 +21,20 @@ def process_todo_nodes(app, doctree, fromdocname):
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
- my_todo_list = nodes.bullet_list("", nodes.Text('',''));
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
- my_todo_list.remove(my_todo_list[0])
+ lists = []
+ for i in xrange(5):
+ lists.append(nodes.bullet_list("", nodes.Text('','')));
+ lists[i].remove(lists[i][0])
+ lists[i].set_class('todo_list')
- my_todo_list.set_class('todo_list')
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
- content = []
-
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
filename = env.doc2path(todo_info['docname'], base=None)
@@ -54,20 +56,33 @@ def process_todo_nodes(app, doctree, fromdocname):
newnode.append(innernode)
para += newnode
- para.set_class("link")
+ para.set_class('todo_link')
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'], app.builder)
- item = nodes.list_item("", para)
- todo_entry[1].set_class("details")
- item.append(todo_entry[1])
+ item = nodes.list_item('', para)
+ todo_entry[1].set_class('details')
+
+ comment = todo_entry[1]
+
+ m = re.match(r"^P(\d)", comment.astext())
+ priority = 5
+ if m:
+ priority = int(m.group(1))
+ if (priority < 0): priority = 1
+ if (priority > 5): priority = 5
+
+ item.set_class('todo_p' + str(priority))
+ todo_entry.set_class('todo_p' + str(priority))
+
+ item.append(comment)
- my_todo_list.insert(0, item)
+ lists[priority-1].insert(0, item)
- node.replace_self(my_todo_list)
+ node.replace_self(lists)
def setup(app):
app.add_config_value('todo_include_todos', False, False)
diff --git a/doc/source/adminguide/distros/others.rst b/doc/source/adminguide/distros/others.rst
new file mode 100644
index 000000000..ec14a9abb
--- /dev/null
+++ b/doc/source/adminguide/distros/others.rst
@@ -0,0 +1,88 @@
+Installation on other distros (like Debian, Fedora or CentOS )
+==============================================================
+
+Feel free to add additional notes for additional distributions.
+
+Nova installation on CentOS 5.5
+-------------------------------
+
+These are notes for installing OpenStack Compute on CentOS 5.5 and will be updated but are NOT final. Please test for accuracy and edit as you see fit.
+
+The principle botleneck for running nova on centos in python 2.6. Nova is written in python 2.6 and CentOS 5.5. comes with python 2.4. We can not update python system wide as some core utilities (like yum) is dependent on python 2.4. Also very few python 2.6 modules are available in centos/epel repos.
+
+Pre-reqs
+--------
+
+Add euca2ools and EPEL repo first.::
+
+ cat >/etc/yum.repos.d/euca2ools.repo << EUCA_REPO_CONF_EOF
+ [eucalyptus]
+ name=euca2ools
+ baseurl=http://www.eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/yum/centos/
+ enabled=1
+ gpgcheck=0
+
+ EUCA_REPO_CONF_EOF
+
+::
+
+ rpm -Uvh 'http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm'
+
+Now install python2.6, kvm and few other libraries through yum::
+
+ yum -y install dnsmasq vblade kpartx kvm gawk iptables ebtables bzr screen euca2ools curl rabbitmq-server gcc gcc-c++ autoconf automake swig openldap openldap-servers nginx python26 python26-devel python26-distribute git openssl-devel python26-tools mysql-server qemu kmod-kvm libxml2 libxslt libxslt-devel mysql-devel
+
+Then download the latest aoetools and then build(and install) it, check for the latest version on sourceforge, exact url will change if theres a new release::
+
+ wget -c http://sourceforge.net/projects/aoetools/files/aoetools/32/aoetools-32.tar.gz/download
+ tar -zxvf aoetools-32.tar.gz
+ cd aoetools-32
+ make
+ make install
+
+Add the udev rules for aoetools::
+
+ cat > /etc/udev/rules.d/60-aoe.rules << AOE_RULES_EOF
+ SUBSYSTEM=="aoe", KERNEL=="discover", NAME="etherd/%k", GROUP="disk", MODE="0220"
+ SUBSYSTEM=="aoe", KERNEL=="err", NAME="etherd/%k", GROUP="disk", MODE="0440"
+ SUBSYSTEM=="aoe", KERNEL=="interfaces", NAME="etherd/%k", GROUP="disk", MODE="0220"
+ SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="0220"
+ # aoe block devices
+ KERNEL=="etherd*", NAME="%k", GROUP="disk"
+ AOE_RULES_EOF
+
+Load the kernel modules::
+
+ modprobe aoe
+
+::
+
+ modprobe kvm
+
+Now, install the python modules using easy_install-2.6, this ensures the installation are done against python 2.6
+
+
+easy_install-2.6 twisted sqlalchemy mox greenlet carrot daemon eventlet tornado IPy routes lxml MySQL-python
+python-gflags need to be downloaded and installed manually, use these commands (check the exact url for newer releases ):
+
+::
+
+ wget -c "http://python-gflags.googlecode.com/files/python-gflags-1.4.tar.gz"
+ tar -zxvf python-gflags-1.4.tar.gz
+ cd python-gflags-1.4
+ python2.6 setup.py install
+ cd ..
+
+Same for python2.6-libxml2 module, notice the --with-python and --prefix flags. --with-python ensures we are building it against python2.6 (otherwise it will build against python2.4, which is default)::
+
+ wget -c "ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz"
+ tar -zxvf libxml2-2.7.3.tar.gz
+ cd libxml2-2.7.3
+ ./configure --with-python=/usr/bin/python26 --prefix=/usr
+ make all
+ make install
+ cd python
+ python2.6 setup.py install
+ cd ..
+
+Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.04.rst b/doc/source/adminguide/distros/ubuntu.10.04.rst
new file mode 100644
index 000000000..ce368fab8
--- /dev/null
+++ b/doc/source/adminguide/distros/ubuntu.10.04.rst
@@ -0,0 +1,41 @@
+Installing on Ubuntu 10.04 (Lucid)
+==================================
+
+Step 1: Install dependencies
+----------------------------
+Grab the latest code from launchpad:
+
+::
+
+ bzr clone lp:nova
+
+Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (when using Debian, edit nova.sh to have USE_PPA=0):
+
+.. todo:: give a link to a stable releases page
+
+Step 2: Install dependencies
+----------------------------
+
+Nova requires rabbitmq for messaging and optionally you can use redis for storing state, so install these first.
+
+*Note:* You must have sudo installed to run these commands as shown here.
+
+::
+
+ sudo apt-get install rabbitmq-server redis-server
+
+
+You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
+
+If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gflags which is included in the OpenStack PPA.
+
+::
+
+ sudo apt-get install python-twisted
+
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 95C71FE2
+ sudo sh -c 'echo "deb http://ppa.launchpad.net/openstack/openstack-ppa/ubuntu lucid main" > /etc/apt/sources.list.d/openstackppa.list'
+ sudo apt-get update && sudo apt-get install python-gflags
+
+
+Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/distros/ubuntu.10.10.rst b/doc/source/adminguide/distros/ubuntu.10.10.rst
new file mode 100644
index 000000000..a3fa2def1
--- /dev/null
+++ b/doc/source/adminguide/distros/ubuntu.10.10.rst
@@ -0,0 +1,41 @@
+Installing on Ubuntu 10.10 (Maverick)
+=====================================
+Single Machine Installation (Ubuntu 10.10)
+
+While we wouldn't expect you to put OpenStack Compute into production on a non-LTS version of Ubuntu, these instructions are up-to-date with the latest version of Ubuntu.
+
+Make sure you are running Ubuntu 10.10 so that the packages will be available. This install requires more than 70 MB of free disk space.
+
+These instructions are based on Soren Hansen's blog entry, Openstack on Maverick. A script is in progress as well.
+
+Step 1: Install required prerequisites
+--------------------------------------
+Nova requires rabbitmq for messaging and redis for storing state (for now), so we'll install these first.::
+
+ sudo apt-get install rabbitmq-server redis-server
+
+You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
+
+Step 2: Install Nova packages available in Maverick Meerkat
+-----------------------------------------------------------
+Type or copy/paste in the following line to get the packages that you use to run OpenStack Compute.::
+
+ sudo apt-get install python-nova
+ sudo apt-get install nova-api nova-objectstore nova-compute nova-scheduler nova-network euca2ools unzip
+
+You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. This operation may take a while as many dependent packages will be installed. Note: there is a dependency problem with python-nova which can be worked around by installing first.
+
+When the installation is complete, you'll see the following lines confirming:::
+
+ Adding system user `nova' (UID 106) ...
+ Adding new user `nova' (UID 106) with group `nogroup' ...
+ Not creating home directory `/var/lib/nova'.
+ Setting up nova-scheduler (0.9.1~bzr331-0ubuntu2) ...
+ * Starting nova scheduler nova-scheduler
+ WARNING:root:Starting scheduler node
+ ...done.
+ Processing triggers for libc-bin ...
+ ldconfig deferred processing now taking place
+ Processing triggers for python-support ...
+
+Once you've done this, continue at Step 3 here: :doc:`../single.node.install`
diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst
index 9a6a70d45..51228b319 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/adminguide/index.rst
@@ -75,6 +75,8 @@ Networking
:maxdepth: 1
multi.node.install
+ network.vlan.rst
+ network.flat.rst
Advanced Topics
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/adminguide/managing.networks.rst
index 3d6b9b7d7..c8df471e8 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/adminguide/managing.networks.rst
@@ -16,74 +16,43 @@
License for the specific language governing permissions and limitations
under the License.
-OpenStack Network Overview
-==========================
+Networking Overview
+===================
+In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.) The Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network.
-Introduction
-------------
+..
+ (perhaps some of this should be moved elsewhere)
+ Introduction
+ ------------
-Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
+ Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
-.. todo:: Insert Figure 1 image from "An OpenStack Network Overview" contributed by Citrix
+ .. todo:: Insert Figure 1 image from "An OpenStack Network Overview" contributed by Citrix
-Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
+ Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
-To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
+ To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
-.. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
+ .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
Nova Network Strategies
-----------------------
-In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.)
-
Currently, Nova supports three kinds of networks, implemented in three "Network Manager" types respectively: Flat Network Manager, Flat DHCP Network Manager, and VLAN Network Manager. The three kinds of networks can c-exist in a cloud system. However, the scheduler for selecting the type of network for a given project is not yet implemented. Here is a brief description of each of the different network strategies, with a focus on the VLAN Manager in a separate section.
-Flat Network
-++++++++++++
-
-IP addresses for VM instances are grabbed from a subnet specified by the network administrator, and injected into the image on launch. All instances of the system are attached to the same Linux networking bridge, configured manually by the network administrator both on the network controller hosting the network and on the computer controllers hosting the instances.
-
-Flat Network with DHCP
-++++++++++++++++++++++
-
-IP addresses for VM instances are grabbed from a subnet specified by the network administrator. Similar to the flat network, a single Linux networking bridge is created and configured manually by the network administrator and used for all instances. A DHCP server is started to pass out IP addresses to VM instances from the specified subnet.
-
-VLAN Network
-++++++++++++
-
-Each project gets its own VLAN, Linux networking bridge, and subnet. The subnets are specified by the network administrator, and are assigned dynamically to a project when required. A DHCP Server is started for each VLAN to pass out IP addresses to VM instances from the subnet assigned to the project. All instances belonging to one project are bridged into the same VLAN for that project. The Linux networking bridges and VLANs are created by Nova when required, described in more detail in Nova VLAN Network Management Implementation.
-
-Nova VLAN Networks
-------------------
-
-Because the flat network and flat DhCP network are simple to understand and yet do not scale well enough for real-world cloud systems, this section focuses on the VLAN network implementation by the VLAN Network Manager.
-
-In the VLAN network mode, all the VM instances of a project are connected together in a VLAN with the specified private subnet. Each running VM instance is assigned an IP address within the given private subnet.
-
-.. todo:: Insert Figure 2 from "An OpenStack Network Overview" contributed by Citrix
-
-While network traffic between VM instances belonging to the same VLAN is always open, Nova can enforce isolation of network traffic between different projects by enforcing one VLAN per project.
-
-In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project.
-
-.. todo: Describe how a public IP address could be associated with a project (a VLAN)
-
-Nova VLAN Network Management Implementation
--------------------------------------------
-
-This section describes the current (November 2010) implementation of the network structure of Nova.
-
-The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance.
+Read more about Nova network strategies here:
-If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed.
+.. toctree::
+ :maxdepth: 1
+ network.flat.rst
+ network.vlan.rst
-.. todo: insert the name of the Linux bridge, is it always named bridge?
+Network Management Commands
+---------------------------
-Managing Networks
-=================
+Admins and Network Administrators can use the 'nova-manage' command to manage network resources:
VPN Management
~~~~~~~~~~~~~~
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index d2afb6212..fa0652bc8 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -15,8 +15,8 @@
License for the specific language governing permissions and limitations
under the License.
-Installing Nova Development Snapshot on Multiple Servers
-========================================================
+Installing Nova on Multiple Servers
+===================================
When you move beyond evaluating the technology and into building an actual
production environemnt, you will need to know how to configure your datacenter
@@ -48,16 +48,23 @@ Step 1 Use apt-get to get the latest code
-----------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/ppa.
+
+::
+
+ sudo apt-get install python-software-properties
+ sudo add-apt-repository ppa:nova-core/ppa
+
2. Run update.
::
- update
+ sudo apt-get update
3. Install nova-pkgs (dependencies should be automatically installed).
::
+ sudo apt-get install python-greenlet
sudo apt-get install nova-common nova-doc python-nova nova-api nova-network nova-objectstore nova-scheduler
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
@@ -103,18 +110,49 @@ Note: CC_ADDR=<the external IP address of your cloud controller>
--FAKE_subdomain=ec2 # workaround for ec2/euca api
+5. Create a nova group
-5. nova-objectstore specific flags < no specific config needed >
+::
+
+ sudo addgroup nova
+
+6. nova-objectstore specific flags < no specific config needed >
Config files should be have their owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
+::
+
+ cd /etc/nova
+ chown -R root:nova .
+
Step 3 Setup the sql db
-----------------------
-1. First you 'preseed' (using vishy's directions here). Run this as root.
+1. First you 'preseed' (using vishy's :doc:`../quickstart`). Run this as root.
+
+::
+
+ sudo apt-get install bzr git-core
+ sudo bash
+ export MYSQL_PASS=nova
+
+
+::
+
+ cat <<MYSQL_PRESEED | debconf-set-selections
+ mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
+ mysql-server-5.1 mysql-server/start_on_boot boolean true
+ MYSQL_PRESEED
+
2. Install mysql
-3. Configure mysql so that external users can access it, and setup nova db.
+
+::
+
+ sudo apt-get install -y mysql-server
+
4. Edit /etc/mysql/my.cnf and set this line: bind-address=0.0.0.0 and then sighup or restart mysql
+
5. create nova's db
::
@@ -130,6 +168,19 @@ Step 3 Setup the sql db
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
+7. branch and install Nova
+
+::
+
+ sudo -i
+ cd ~
+ export USE_MYSQL=1
+ export MYSQL_PASS=nova
+ git clone https://github.com/vishvananda/novascript.git
+ cd novascript
+ ./nova.sh branch
+ ./nova.sh install
+ ./nova.sh run
Step 4 Setup Nova environment
-----------------------------
diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/adminguide/network.flat.rst
new file mode 100644
index 000000000..1b8661a40
--- /dev/null
+++ b/doc/source/adminguide/network.flat.rst
@@ -0,0 +1,60 @@
+..
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+
+Flat Network Mode (Original and Flat)
+=====================================
+
+Flat network mode removes most of the complexity of VLAN mode by simply
+bridging all instance interfaces onto a single network.
+
+There are two variations of flat mode that differ mostly in how IP addresses
+are given to instances.
+
+
+Original Flat Mode
+------------------
+IP addresses for VM instances are grabbed from a subnet specified by the network administrator, and injected into the image on launch. All instances of the system are attached to the same Linux networking bridge, configured manually by the network administrator both on the network controller hosting the network and on the computer controllers hosting the instances. To recap:
+
+* Each compute host creates a single bridge for all instances to use to attach to the external network.
+* The networking configuration is injected into the instance before it is booted or it is obtained by a guest agent installed in the instance.
+
+Note that the configuration injection currently only works on linux-style systems that keep networking
+configuration in /etc/network/interfaces.
+
+
+Flat DHCP Mode
+--------------
+IP addresses for VM instances are grabbed from a subnet specified by the network administrator. Similar to the flat network, a single Linux networking bridge is created and configured manually by the network administrator and used for all instances. A DHCP server is started to pass out IP addresses to VM instances from the specified subnet. To recap:
+
+* Like flat mode, all instances are attached to a single bridge on the compute node.
+* In addition a DHCP server is running to configure instances.
+
+Implementation
+--------------
+
+The network nodes do not act as a default gateway in flat mode. Instances
+are given public IP addresses.
+
+Compute nodes have iptables/ebtables entries created per project and
+instance to protect against IP/MAC address spoofing and ARP poisoning.
+
+
+Examples
+--------
+
+.. todo:: add flat network mode configuration examples
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst
new file mode 100644
index 000000000..a7cccc098
--- /dev/null
+++ b/doc/source/adminguide/network.vlan.rst
@@ -0,0 +1,179 @@
+..
+ Copyright 2010 United States Government as represented by the
+ Administrator of the National Aeronautics and Space Administration.
+ All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+
+VLAN Network Mode
+=================
+VLAN Network Mode is the default mode for Nova. It provides a private network
+segment for each project's instances that can be accessed via a dedicated
+VPN connection from the Internet.
+
+In this mode, each project gets its own VLAN, Linux networking bridge, and subnet. The subnets are specified by the network administrator, and are assigned dynamically to a project when required. A DHCP Server is started for each VLAN to pass out IP addresses to VM instances from the subnet assigned to the project. All instances belonging to one project are bridged into the same VLAN for that project. The Linux networking bridges and VLANs are created by Nova when required, described in more detail in Nova VLAN Network Management Implementation.
+
+..
+ (this text revised above)
+ Because the flat network and flat DhCP network are simple to understand and yet do not scale well enough for real-world cloud systems, this section focuses on the VLAN network implementation by the VLAN Network Manager.
+
+
+ In the VLAN network mode, all the VM instances of a project are connected together in a VLAN with the specified private subnet. Each running VM instance is assigned an IP address within the given private subnet.
+
+.. todo:: Insert Figure 2 from "An OpenStack Network Overview" contributed by Citrix
+
+While network traffic between VM instances belonging to the same VLAN is always open, Nova can enforce isolation of network traffic between different projects by enforcing one VLAN per project.
+
+In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project.
+
+.. todo:: Describe how a public IP address could be associated with a project (a VLAN)
+
+This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
+
+The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
+
+.. image:: /images/cloudpipe.png
+ :width: 100%
+
+Goals
+-----
+
+* each project is in a protected network segment
+
+ * RFC-1918 IP space
+ * public IP via NAT
+ * no default inbound Internet access without public NAT
+ * limited (project-admin controllable) outbound Internet access
+ * limited (project-admin controllable) access to other project segments
+ * all connectivity to instance and cloud API is via VPN into the project segment
+
+* common DMZ segment for support services (only visible from project segment)
+
+ * metadata
+ * dashboard
+
+
+Limitations
+-----------
+
+* Projects / cluster limited to available VLANs in switching infrastructure
+* Requires VPN for access to project segment
+
+
+Implementation
+--------------
+Currently Nova segregates project VLANs using 802.1q VLAN tagging in the
+switching layer. Compute hosts create VLAN-specific interfaces and bridges
+as required.
+
+The network nodes act as default gateway for project networks and contain
+all of the routing and firewall rules implementing security groups. The
+network node also handles DHCP to provide instance IPs for each project.
+
+VPN access is provided by running a small instance called CloudPipe
+on the IP immediately following the gateway IP for each project. The
+network node maps a dedicated public IP/port to the CloudPipe instance.
+
+Compute nodes have per-VLAN interfaces and bridges created as required.
+These do NOT have IP addresses in the host to protect host access.
+Compute nodes have iptables/ebtables entries created per project and
+instance to protect against IP/MAC address spoofing and ARP poisoning.
+
+The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance.
+
+If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed.
+
+.. todo:: insert the name of the Linux bridge, is it always named bridge?
+
+External Infrastructure
+-----------------------
+
+Nova assumes the following is available:
+
+* DNS
+* NTP
+* Internet connectivity
+
+
+Example
+-------
+
+This example network configuration demonstrates most of the capabilities
+of VLAN Mode. It splits administrative access to the nodes onto a dedicated
+management network and uses dedicated network nodes to handle all
+routing and gateway functions.
+
+It uses a 10GB network for instance traffic and a 1GB network for management.
+
+
+Hardware
+~~~~~~~~
+
+* All nodes have a minimum of two NICs for management and production.
+
+ * management is 1GB
+ * production is 10GB
+ * add additional NICs for bonding or HA/performance
+
+* network nodes should have an additional NIC dedicated to public Internet traffic
+* switch needs to support enough simultaneous VLANs for number of projects
+* production network configured as 802.1q trunk on switch
+
+
+Operation
+~~~~~~~~~
+
+The network node controls the project network configuration:
+
+* assigns each project a VLAN and private IP range
+* starts dnsmasq on project VLAN to serve private IP range
+* configures iptables on network node for default project access
+* launches CloudPipe instance and configures iptables access
+
+When starting an instance the network node:
+
+* sets up a VLAN interface and bridge on each host as required when an
+ instance is started on that host
+* assigns private IP to instance
+* generates MAC address for instance
+* update dnsmasq with IP/MAC for instance
+
+When starting an instance the compute node:
+
+* sets up a VLAN interface and bridge on each host as required when an
+ instance is started on that host
+
+
+Setup
+~~~~~
+
+* Assign VLANs in the switch:
+
+ * public Internet segment
+ * production network
+ * management network
+ * cluster DMZ
+
+* Assign a contiguous range of VLANs to Nova for project use.
+* Configure management NIC ports as management VLAN access ports.
+* Configure management VLAN with Internet access as required
+* Configure production NIC ports as 802.1q trunk ports.
+* Configure Nova (need to add specifics here)
+
+ * public IPs
+ * instance IPs
+ * project network size
+ * DMZ network
+
+.. todo:: need specific Nova configuration added
diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst
index 9ecb6d49a..27597962a 100644
--- a/doc/source/adminguide/single.node.install.rst
+++ b/doc/source/adminguide/single.node.install.rst
@@ -1,12 +1,344 @@
-Single Node Installation
-========================
-
-.. todo:: need extended notes on running a single machine
+Installing Nova on a Single Host
+================================
Nova can be run on a single machine, and it is recommended that new users practice managing this type of installation before graduating to multi node systems.
-The fastest way to get a test cloud running is through our :doc:`../quickstart`.
+The fastest way to get a test cloud running is through our :doc:`../quickstart`. But for more detail on installing the system read this doc.
+
+
+Step 1 and 2: Get the latest Nova code system software
+------------------------------------------------------
+
+Depending on your system, the mehod for accomplishing this varies
+
+.. toctree::
+ :maxdepth: 1
+
+ distros/ubuntu.10.04
+ distros/ubuntu.10.10
+ distros/others
+
+
+Step 3: Build and install Nova services
+---------------------------------------
+
+Switch to the base nova source directory.
+
+Then type or copy/paste in the following line to compile the Python code for OpenStack Compute.
+
+::
+
+ sudo python setup.py build
+ sudo python setup.py install
+
+
+When the installation is complete, you'll see the following lines:
+
+::
+
+ Installing nova-network script to /usr/local/bin
+ Installing nova-volume script to /usr/local/bin
+ Installing nova-objectstore script to /usr/local/bin
+ Installing nova-manage script to /usr/local/bin
+ Installing nova-scheduler script to /usr/local/bin
+ Installing nova-dhcpbridge script to /usr/local/bin
+ Installing nova-compute script to /usr/local/bin
+ Installing nova-instancemonitor script to /usr/local/bin
+ Installing nova-api script to /usr/local/bin
+ Installing nova-import-canonical-imagestore script to /usr/local/bin
+
+ Installed /usr/local/lib/python2.6/dist-packages/nova-2010.1-py2.6.egg
+ Processing dependencies for nova==2010.1
+ Finished processing dependencies for nova==2010.1
+
+
+Step 4: Create a Nova administrator
+-----------------------------------
+Type or copy/paste in the following line to create a user named "anne."::
+
+ sudo nova-manage user admin anne
+
+You see an access key and a secret key export, such as these made-up ones:::
+
+ export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
+ export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
+
+
+Step 5: Create a project with the user you created
+--------------------------------------------------
+Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
+
+::
+
+ sudo nova-manage project create IRT anne
+
+::
+
+ Generating RSA private key, 1024 bit long modulus
+ .....++++++
+ ..++++++
+ e is 65537 (0x10001)
+ Using configuration from ./openssl.cnf
+ Check that the request matches the signature
+ Signature ok
+ The Subject's Distinguished Name is as follows
+ countryName :PRINTABLE:'US'
+ stateOrProvinceName :PRINTABLE:'California'
+ localityName :PRINTABLE:'MountainView'
+ organizationName :PRINTABLE:'AnsoLabs'
+ organizationalUnitName:PRINTABLE:'NovaDev'
+ commonName :PRINTABLE:'anne-2010-10-12T21:12:35Z'
+ Certificate is to be certified until Oct 12 21:12:35 2011 GMT (365 days)
+
+ Write out database with 1 new entries
+ Data Base Updated
+
+
+Step 6: Unzip the nova.zip
+--------------------------
+You should have a nova.zip file in your current working directory. Unzip it with this command:
-Install Dependencies
+::
+
+ unzip nova.zip
+
+
+You'll see these files extract.
+
+::
+
+ Archive: nova.zip
+ extracting: novarc
+ extracting: pk.pem
+ extracting: cert.pem
+ extracting: nova-vpn.conf
+ extracting: cacert.pem
+
+
+Step 7: Source the rc file
+--------------------------
+Type or copy/paste the following to source the novarc file in your current working directory.
+
+::
+
+ . novarc
+
+
+Step 8: Pat yourself on the back :)
+-----------------------------------
+Congratulations, your cloud is up and running, you’ve created an admin user, retrieved the user's credentials and put them in your environment.
+
+Now you need an image.
+
+
+Step 9: Get an image
--------------------
+To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server.
+
+::
+
+ wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
+
+
+::
+
+ --2010-10-12 21:40:55-- http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
+ Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
+ Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
+ HTTP request sent, awaiting response... 200 OK
+ Length: 58520278 (56M) [appication/x-gzip]
+ Saving to: `images.tgz'
+
+ 100%[======================================>] 58,520,278 14.1M/s in 3.9s
+
+ 2010-10-12 21:40:59 (14.1 MB/s) - `images.tgz' saved [58520278/58520278]
+
+
+
+Step 10: Decompress the image file
+----------------------------------
+Use this command to extract the image files:::
+
+ tar xvzf images.tgz
+
+You get a directory listing like so:::
+
+ images
+ |-- aki-lucid
+ | |-- image
+ | `-- info.json
+ |-- ami-tiny
+ | |-- image
+ | `-- info.json
+ `-- ari-lucid
+ |-- image
+ `-- info.json
+
+Step 11: Send commands to upload sample image to the cloud
+----------------------------------------------------------
+
+Type or copy/paste the following commands to create a manifest for the kernel.::
+
+ euca-bundle-image -i images/aki-lucid/image -p kernel --kernel true
+
+You should see this in response:::
+
+ Checking image
+ Tarring image
+ Encrypting image
+ Splitting image...
+ Part: kernel.part.0
+ Generating manifest /tmp/kernel.manifest.xml
+
+Type or copy/paste the following commands to create a manifest for the ramdisk.::
+
+ euca-bundle-image -i images/ari-lucid/image -p ramdisk --ramdisk true
+
+You should see this in response:::
+
+ Checking image
+ Tarring image
+ Encrypting image
+ Splitting image...
+ Part: ramdisk.part.0
+ Generating manifest /tmp/ramdisk.manifest.xml
+
+Type or copy/paste the following commands to upload the kernel bundle.::
+
+ euca-upload-bundle -m /tmp/kernel.manifest.xml -b mybucket
+
+You should see this in response:::
+
+ Checking bucket: mybucket
+ Creating bucket: mybucket
+ Uploading manifest file
+ Uploading part: kernel.part.0
+ Uploaded image as mybucket/kernel.manifest.xml
+
+Type or copy/paste the following commands to upload the ramdisk bundle.::
+
+ euca-upload-bundle -m /tmp/ramdisk.manifest.xml -b mybucket
+
+You should see this in response:::
+
+ Checking bucket: mybucket
+ Uploading manifest file
+ Uploading part: ramdisk.part.0
+ Uploaded image as mybucket/ramdisk.manifest.xml
+
+Type or copy/paste the following commands to register the kernel and get its ID.::
+
+ euca-register mybucket/kernel.manifest.xml
+
+You should see this in response:::
+
+ IMAGE ami-fcbj2non
+
+Type or copy/paste the following commands to register the ramdisk and get its ID.::
+
+ euca-register mybucket/ramdisk.manifest.xml
+
+You should see this in response:::
+
+ IMAGE ami-orukptrc
+
+Type or copy/paste the following commands to create a manifest for the machine image associated with the ramdisk and kernel IDs that you got from the previous commands.::
+
+ euca-bundle-image -i images/ami-tiny/image -p machine --kernel ami-fcbj2non --ramdisk ami-orukptrc
+
+You should see this in response:::
+
+ Checking image
+ Tarring image
+ Encrypting image
+ Splitting image...
+ Part: machine.part.0
+ Part: machine.part.1
+ Part: machine.part.2
+ Part: machine.part.3
+ Part: machine.part.4
+ Generating manifest /tmp/machine.manifest.xml
+
+Type or copy/paste the following commands to upload the machine image bundle.::
+
+ euca-upload-bundle -m /tmp/machine.manifest.xml -b mybucket
+
+You should see this in response:::
+
+ Checking bucket: mybucket
+ Uploading manifest file
+ Uploading part: machine.part.0
+ Uploading part: machine.part.1
+ Uploading part: machine.part.2
+ Uploading part: machine.part.3
+ Uploading part: machine.part.4
+ Uploaded image as mybucket/machine.manifest.xml
+
+Type or copy/paste the following commands to register the machine image and get its ID.::
+
+ euca-register mybucket/machine.manifest.xml
+
+You should see this in response:::
+
+ IMAGE ami-g06qbntt
+
+Type or copy/paste the following commands to register a SSH keypair for use in starting and accessing the instances.::
+
+ euca-add-keypair mykey > mykey.priv
+ chmod 600 mykey.priv
+
+Type or copy/paste the following commands to run an instance using the keypair and IDs that we previously created.::
+
+ euca-run-instances ami-g06qbntt --kernel ami-fcbj2non --ramdisk ami-orukptrc -k mykey
+
+You should see this in response:::
+
+ RESERVATION r-0at28z12 IRT
+ INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 scheduling mykey (IRT, None) m1.small 2010-10-18 19:02:10.443599
+
+Type or copy/paste the following commands to watch as the scheduler launches, and completes booting your instance.::
+
+ euca-describe-instances
+
+You should see this in response:::
+
+ RESERVATION r-0at28z12 IRT
+ INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 launching mykey (IRT, cloud02) m1.small 2010-10-18 19:02:10.443599
+
+Type or copy/paste the following commands to see when loading is completed and the instance is running.::
+
+ euca-describe-instances
+
+You should see this in response:::
+
+ RESERVATION r-0at28z12 IRT
+ INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 running mykey (IRT, cloud02) 0 m1.small 2010-10-18 19:02:10.443599
+
+Type or copy/paste the following commands to check that the virtual machine is running.::
+
+ virsh list
+
+You should see this in response:::
+
+ Id Name State
+ ----------------------------------
+ 1 2842445831 running
+
+Type or copy/paste the following commands to ssh to the instance using your private key.::
+
+ ssh -i mykey.priv root@10.0.0.3
+
+
+Troubleshooting Installation
+----------------------------
+
+If you see an "error loading the config file './openssl.cnf'" it means you can copy the openssl.cnf file to the location where Nova expects it and reboot, then try the command again.
+
+::
+
+ cp /etc/ssl/openssl.cnf ~
+ sudo reboot
+
+
+
diff --git a/doc/source/community.rst b/doc/source/community.rst
index 61e2536c2..bfb93414c 100644
--- a/doc/source/community.rst
+++ b/doc/source/community.rst
@@ -61,7 +61,8 @@ Nova on Launchpad
Launchpad is a code hosting service that hosts the Nova source code. From
Launchpad you can report bugs, ask questions, and register blueprints (feature requests).
-`Launchpad Nova Page <http://launchpad.net/nova>`_
+* `Learn about how to use bzr with launchpad <http://wiki.openstack.org/LifeWithBzrAndLaunchpad>`_
+* `Launchpad Nova Page <http://launchpad.net/nova>`_
OpenStack Blog
--------------
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2f2d97c44..ef447ca81 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -24,9 +24,14 @@ sys.path.insert(0, os.path.abspath('./'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-# extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig','sphinx.ext.graphviz', 'ext.nova_autodoc']
+
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'ext.nova_todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig','sphinx.ext.graphviz']
+# autodoc generation is a bit aggressive and a nuisance when doing heavy text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+if not os.getenv('SPHINX_DEBUG'):
+ extensions += ['ext.nova_autodoc']
+
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index ce251dd14..ddf0f1b82 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -31,7 +31,7 @@ run on your host operating system, and exposes functionality over a web API.
This document does not attempt to explain fundamental concepts of cloud
computing, IaaS, virtualization, or other related technologies. Instead, it
-focues on describing how Nova's implementation of those concepts is achieved.
+focuses on describing how Nova's implementation of those concepts is achieved.
This page outlines concepts that you will need to understand as a user or
administrator of an OpenStack installation. Each section links to more more
@@ -121,7 +121,7 @@ This is similar to the flat mode, in that all instances are attached to the same
VLAN DHCP Mode
~~~~~~~~~~~~~~
-This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the userto access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
+This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`.
The following diagram illustrates how the communication that occurs between the vlan (the dashed box) and the public internet (represented by the two clouds)
@@ -168,8 +168,7 @@ Concept: Plugins
Concept: IPC/RPC
----------------
-Rabbit!
-
+Nova utilizes the RabbitMQ implementation of the AMQP messaging standard for performing communication between the various nova services. This message queuing service is used for both local and remote communication because Nova is designed so that there is no requirement that any of the services exist on the same physical machine. RabbitMQ in particular is very robust and provides the efficiency and reliability that Nova needs. More information about RabbitMQ can be found at http://www.rabbitmq.com/.
Concept: Fakes
--------------
diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst
index acf303f91..ae2b64d8a 100644
--- a/doc/source/quickstart.rst
+++ b/doc/source/quickstart.rst
@@ -18,8 +18,8 @@
Nova Quickstart
===============
-.. todo::
-
+.. todo::
+ P1 (this is one example of how to use priority syntax)
* Document the assumptions about pluggable interfaces (sqlite3 instead of
mysql, etc) (todd)
* Document env vars that can change things (USE_MYSQL, HOST_IP) (todd)
@@ -56,11 +56,9 @@ By tweaking the environment that nova.sh run in, you can build slightly
different configurations (though for more complex setups you should see
:doc:`/adminguide/getting.started` and :doc:`/adminguide/multi.node.install`).
-HOST_IP
-~~~~~~~
-
-**Default**: address of first interface from the ifconfig command
-**Values**: 127.0.0.1, or any other valid address
+* HOST_IP
+ * Default: address of first interface from the ifconfig command
+ * Values: 127.0.0.1, or any other valid address
TEST
~~~~
@@ -166,3 +164,15 @@ Then you can destroy the screen:
If things get particularly messed up, you might need to do some more intense
cleanup. Be careful, the following command will manually destroy all runnning
virsh instances and attempt to delete all vlans and bridges.
+
+::
+
+ ./nova.sh scrub
+
+You can edit files in the install directory or do a bzr pull to pick up new versions. You only need to do
+
+::
+
+ ./nova.sh run
+
+to run nova after the first install. The database should be cleaned up on each run. \ No newline at end of file
diff --git a/doc/source/service.architecture.rst b/doc/source/service.architecture.rst
index b621dcfa5..28a32bec6 100644
--- a/doc/source/service.architecture.rst
+++ b/doc/source/service.architecture.rst
@@ -17,7 +17,7 @@ Nova’s Cloud Fabric is composed of the following major components:
API Server
--------------------------------------------------
-At the heart of the cloud framework is an API Server. This API Server makes command and control [#f80]_ of the hypervisor, storage, and networking programmatically available to users in realization of the definition of cloud computing.
+At the heart of the cloud framework is an API Server. This API Server makes command and control of the hypervisor, storage, and networking programmatically available to users in realization of the definition of cloud computing.
The API endpoints are basic http web services which handle authentication, authorization, and basic command and control functions using various API interfaces under the Amazon, Rackspace, and related models. This enables API compatibility with multiple existing tool sets created for interaction with offerings from other vendors. This broad compatibility prevents vendor lock-in.
@@ -48,7 +48,7 @@ The Network Controller manages the networking resources on host machines. The A
Volume Workers
--------------------------------------------------
-Volume Workers interact with iSCSI storage to manage LVM-based [#f89]_ instance volumes. Specific functions include:
+Volume Workers interact with iSCSI storage to manage LVM-based instance volumes. Specific functions include:
* Create Volumes
* Delete Volumes
@@ -57,4 +57,4 @@ Volume Workers interact with iSCSI storage to manage LVM-based [#f89]_ instance
Volumes may easily be transferred between instances, but may be attached to only a single instance at a time.
-.. todo:: image store description
+.. todo:: P2: image store description
diff --git a/nova/api/__init__.py b/nova/api/__init__.py
index 1dabd3d21..7e75445a8 100644
--- a/nova/api/__init__.py
+++ b/nova/api/__init__.py
@@ -84,7 +84,6 @@ class API(wsgi.Router):
mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API())
super(API, self).__init__(mapper)
- @utils.fix_wsgify_docstr
@webob.dec.wsgify
def osapi_versions(self, req):
"""Respond to a request for all OpenStack API versions."""
@@ -96,7 +95,6 @@ class API(wsgi.Router):
"attributes": dict(version=["status", "id"])}}
return wsgi.Serializer(req.environ, metadata).to_content_type(response)
- @utils.fix_wsgify_docstr
@webob.dec.wsgify
def ec2api_versions(self, req):
"""Respond to a request for all EC2 versions."""
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index fbe4caa48..e2eaa7c5c 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -679,7 +679,7 @@ class CloudController(object):
context.project_id)
for floating_ip_ref in iterator:
address = floating_ip_ref['address']
- instance_id = None
+ ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
@@ -717,8 +717,8 @@ class CloudController(object):
"args": {"floating_address": floating_ip_ref['address']}})
return {'releaseResponse': ["Address released."]}
- def associate_address(self, context, ec2_id, public_ip, **kwargs):
- internal_id = ec2_id_to_internal_id(ec2_id)
+ def associate_address(self, context, instance_id, public_ip, **kwargs):
+ internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
instance_ref['id'])
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 0b8568d33..4338d39f0 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -165,6 +165,11 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
@defer.inlineCallbacks
def _inject_key_into_fs(key, fs, execute=None):
+ """Add the given public ssh key to root's authorized_keys.
+
+ key is an ssh key string.
+ fs is the path to the base of the filesystem into which to inject the key.
+ """
sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh')
yield execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
yield execute('sudo chown root %s' % sshdir)
@@ -175,6 +180,13 @@ def _inject_key_into_fs(key, fs, execute=None):
@defer.inlineCallbacks
def _inject_net_into_fs(net, fs, execute=None):
- netfile = os.path.join(os.path.join(os.path.join(
- fs, 'etc'), 'network'), 'interfaces')
+ """Inject /etc/network/interfaces into the filesystem rooted at fs.
+
+ net is the contents of /etc/network/interfaces.
+ """
+ netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
+ yield execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
+ yield execute('sudo chown root:root %s' % netdir)
+ yield execute('sudo chmod 755 %s' % netdir)
+ netfile = os.path.join(netdir, 'interfaces')
yield execute('sudo tee %s' % netfile, net)
diff --git a/nova/service.py b/nova/service.py
index 0eb3a2762..9454d4049 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -19,7 +19,8 @@
"""
A service is a very thin wrapper around a Manager object. It exposes the
manager's public methods to other components of the system via rpc. It will
-report state periodically to the database and is responsible for initiating any periodic tasts that need to be executed on a given host.
+report state periodically to the database and is responsible for initiating
+any periodic tasts that need to be executed on a given host.
This module contains Service, a generic baseclass for all workers.
"""
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 2d61d2675..2c6d9959b 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -91,6 +91,37 @@ class CloudTestCase(test.TrialTestCase):
# NOTE(vish): create depends on pool, so just call helper directly
return cloud._gen_key(self.context, self.context.user.id, name)
+ def test_describe_addresses(self):
+ """Makes sure describe addresses runs without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_associate_disassociate_address(self):
+ """Verifies associate runs cleanly without raising an exception"""
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': FLAGS.host})
+ self.cloud.allocate_address(self.context)
+ inst = db.instance_create(self.context, {})
+ ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.instance_destroy(self.context, inst['id'])
+ db.floating_ip_destroy(self.context, address)
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index b7caed4fd..6f4705719 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -41,7 +41,6 @@ class NetworkTestCase(test.TrialTestCase):
# flags in the corresponding section in nova-dhcpbridge
self.flags(connection_type='fake',
fake_network=True,
- auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
network_size=16,
num_networks=5)
logging.getLogger().setLevel(logging.DEBUG)
@@ -127,6 +126,7 @@ class NetworkTestCase(test.TrialTestCase):
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
release_ip(fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
def test_allocate_deallocate_fixed_ip(self):
"""Makes sure that we can allocate and deallocate a fixed ip"""
diff --git a/nova/utils.py b/nova/utils.py
index d7ebe5b4c..2970b93bb 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -233,10 +233,3 @@ def utf8(value):
return value.encode("utf-8")
assert isinstance(value, str)
return value
-
-def fix_wsgify_docstr(wsgified_func):
- """A decorator to re-assign docstrings that webob.dec.wsgify clobbers."""
- @functools.wraps(wsgified_func.func)
- def _f(*args, **kwargs):
- wsgified_func(*args, **kwargs)
- return _f
diff --git a/run_tests.py b/run_tests.py
index 9a2f40dc9..101ed1a0b 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -65,7 +65,6 @@ from nova.tests.twistd_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.virt_unittest import *
from nova.tests.volume_unittest import *
-from nova.tests.virt_unittest import *
FLAGS = flags.FLAGS
diff --git a/setup.py b/setup.py
index a333fbf64..9525fde09 100644
--- a/setup.py
+++ b/setup.py
@@ -55,4 +55,5 @@ setup(name='nova',
'bin/nova-network',
'bin/nova-objectstore',
'bin/nova-scheduler',
- 'bin/nova-volume'])
+ 'bin/nova-volume',
+ 'tools/nova-debug'])
diff --git a/tools/nova-debug b/tools/nova-debug
new file mode 100755
index 000000000..3ff68ca35
--- /dev/null
+++ b/tools/nova-debug
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+INSTANCES_PATH=${INSTANCES_PATH:-/var/lib/nova/instances}
+if [ -z "$1" ]; then echo "specify an instance id to debug"; exit; fi
+
+if [ -n "$3" ]; then DEVICE=$3; fi
+
+CMD="all"
+if [ -n "$2" ]; then CMD=$2; fi
+
+cd $INSTANCES_PATH/$1
+
+if [ $CMD != "umount" ] && [ $CMD != "launch" ]; then
+# destroy the instance
+virsh destroy $1
+
+# mount the filesystem
+mkdir t
+DEVICE=`losetup --show -f disk`
+echo $DEVICE
+kpartx -a $DEVICE
+mount /dev/mapper/${DEVICE:4}p1 t
+
+fi
+if [ $CMD != "mount" ] && [ $CMD != "umount" ]; then
+
+# make serial console listen on ttyS0
+cat >t/etc/init/ttyS0.conf <<TTY_EOF
+# ttyS0 - getty
+#
+# This service maintains a getty on ttyS0 from the point the system is
+# started until it is shut down again.
+
+start on stopped rc RUNLEVEL=[2345]
+stop on runlevel [!2345]
+
+respawn
+exec /sbin/getty -L 115200 ttyS0 xterm
+TTY_EOF
+
+echo
+# set debug root password
+chroot t passwd -u root
+# TODO(vish): automate this with expect
+chroot t passwd root
+
+tr -d '\n' < libvirt.xml > debug.xml
+sed -i "s/<serial type=\"file\">.*<\/serial>/<serial type=\"pty\"><source path=\"\/dev\/pts\/1\"\/><target port=\"0\"\/><\/serial>/g" debug.xml
+
+umount t
+
+virsh create debug.xml
+virsh console $1
+virsh destroy $1
+
+mount /dev/mapper/${DEVICE:4}p1 t
+
+# clear debug root password
+chroot t passwd -l root
+
+# remove the serial console conf
+rm -f t/etc/init/ttyS0.conf
+
+fi
+if [ $CMD != "mount" ] && [ $CMD != "launch" ]; then
+
+# unmount the filesystem
+umount t
+kpartx -d $DEVICE
+losetup -d $DEVICE
+rmdir t
+
+# recreate the instance
+virsh create libvirt.xml
+fi