summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEd Leafe <ed@leafe.com>2011-01-19 10:41:30 -0500
committerEd Leafe <ed@leafe.com>2011-01-19 10:41:30 -0500
commitea812e66a4d2d84af8c63281c35c4109be1f67db (patch)
tree6e34fd2cead93cd689c3d60f615a3aba30d46704
parentc4ecab260bed2118b119c7e415c2682e689fca3b (diff)
parent7d7fbf5dfd8a8e10f584df5d27d3479c4b2b4d3a (diff)
downloadnova-ea812e66a4d2d84af8c63281c35c4109be1f67db.tar.gz
nova-ea812e66a4d2d84af8c63281c35c4109be1f67db.tar.xz
nova-ea812e66a4d2d84af8c63281c35c4109be1f67db.zip
merged trunk changes
-rw-r--r--.mailmap2
-rw-r--r--Authors2
-rw-r--r--MANIFEST.in1
-rwxr-xr-xbin/nova-manage19
-rw-r--r--nova/api/ec2/cloud.py6
-rw-r--r--[-rwxr-xr-x]nova/db/migration.py (renamed from tools/iscsidev.sh)33
-rw-r--r--nova/db/sqlalchemy/__init__.py29
-rw-r--r--nova/db/sqlalchemy/migrate_repo/README4
-rw-r--r--nova/db/sqlalchemy/migrate_repo/__init__.py0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/manage.py4
-rw-r--r--nova/db/sqlalchemy/migrate_repo/migrate.cfg20
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/001_austin.py547
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py209
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/__init__.py0
-rw-r--r--nova/db/sqlalchemy/migration.py72
-rw-r--r--nova/db/sqlalchemy/models.py45
-rw-r--r--nova/network/manager.py2
-rw-r--r--nova/service.py14
-rw-r--r--nova/tests/__init__.py5
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/glance/__init__.py20
-rw-r--r--nova/tests/glance/stubs.py37
-rw-r--r--nova/tests/test_virt.py63
-rw-r--r--nova/tests/test_xenapi.py106
-rw-r--r--nova/tests/xenapi/stubs.py24
-rw-r--r--nova/virt/fake.py48
-rw-r--r--nova/virt/libvirt.xml.template10
-rw-r--r--nova/virt/libvirt_conn.py22
-rw-r--r--nova/virt/xenapi/fake.py87
-rw-r--r--nova/virt/xenapi/vm_utils.py255
-rw-r--r--nova/virt/xenapi/vmops.py3
-rw-r--r--nova/virt/xenapi_conn.py3
-rw-r--r--nova/volume/driver.py5
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance33
-rw-r--r--run_tests.py2
-rw-r--r--tools/pip-requires2
36 files changed, 1519 insertions, 216 deletions
diff --git a/.mailmap b/.mailmap
index 2af2d7cd9..d13219ab0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -16,6 +16,8 @@
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
+<masumotok@nttdata.co.jp> <root@openstack2-api>
+<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
<mordred@inaugust.com> <mordred@hudson>
<paul@openstack.org> <pvoccio@castor.local>
<paul@openstack.org> <paul.voccio@rackspace.com>
diff --git a/Authors b/Authors
index bcb2cd0fb..82e07a6b5 100644
--- a/Authors
+++ b/Authors
@@ -26,6 +26,7 @@ Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
@@ -34,6 +35,7 @@ Michael Gundlach <michael.gundlach@rackspace.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
+Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
diff --git a/MANIFEST.in b/MANIFEST.in
index 199ce30b6..07e4dd516 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,6 +12,7 @@ include nova/cloudpipe/bootscript.sh
include nova/cloudpipe/client.ovpn.template
include nova/compute/fakevirtinstance.xml
include nova/compute/interfaces.template
+include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
include nova/tests/CA/
diff --git a/bin/nova-manage b/bin/nova-manage
index b5842b595..d0901ddfc 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -82,6 +82,7 @@ from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
+from nova.db import migration
logging.basicConfig()
@@ -519,6 +520,21 @@ class LogCommands(object):
print re.sub('#012', "\n", "\n".join(lines))
+class DbCommands(object):
+ """Class for managing the database."""
+
+ def __init__(self):
+ pass
+
+ def sync(self, version=None):
+ """Sync the database up to the most recent version."""
+ return migration.db_sync(version)
+
+ def version(self):
+ """Print the current database version."""
+ print migration.db_version()
+
+
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -528,7 +544,8 @@ CATEGORIES = [
('floating', FloatingIpCommands),
('network', NetworkCommands),
('service', ServiceCommands),
- ('log', LogCommands)]
+ ('log', LogCommands),
+ ('db', DbCommands)]
def lazy_match(name, key_value_tuples):
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index cf3a5d2b3..05976afb9 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -730,7 +730,7 @@ class CloudController(object):
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
- instance_id = floating_ip_ref['fixed_ip']['instance']['ec2_id']
+ instance_id = floating_ip_ref['fixed_ip']['instance']['id']
ec2_id = id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
@@ -841,8 +841,8 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
- msg = _("Registered image %(image_location)s with id %(image_id)s")
- % locals()
+ msg = _("Registered image %(image_location)s with"
+ " id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
diff --git a/tools/iscsidev.sh b/nova/db/migration.py
index 6f5b572df..e54b90cd8 100755..100644
--- a/tools/iscsidev.sh
+++ b/nova/db/migration.py
@@ -1,4 +1,4 @@
-#!/bin/sh
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -15,27 +15,24 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+"""Database setup and migration commands."""
-# NOTE(vish): This script helps udev create common names for discovered iscsi
-# volumes under /dev/iscsi. To use it, create /dev/iscsi and add
-# a file to /etc/udev/rules.d like so:
-# mkdir /dev/iscsi
-# echo 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/path/to/iscsidev.sh
-# %b",SYMLINK+="iscsi/%c%n"' > /etc/udev/rules.d/55-openiscsi.rules
+from nova import flags
+from nova import utils
-BUS=${1}
-HOST=${BUS%%:*}
+FLAGS = flags.FLAGS
+flags.DECLARE('db_backend', 'nova.db.api')
-if [ ! -e /sys/class/iscsi_host ]; then
- exit 1
-fi
-file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
+IMPL = utils.LazyPluggable(FLAGS['db_backend'],
+ sqlalchemy='nova.db.sqlalchemy.migration')
-target_name=$(cat ${file})
-if [ -z "${target_name}" ]; then
- exit 1
-fi
+def db_sync(version=None):
+ """Migrate the database to `version` or the most recent version."""
+ return IMPL.db_sync(version=version)
-echo "${target_name##*:}"
+
+def db_version():
+ """Display the current database version."""
+ return IMPL.db_version()
diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py
index 61f128f8a..747015af5 100644
--- a/nova/db/sqlalchemy/__init__.py
+++ b/nova/db/sqlalchemy/__init__.py
@@ -15,32 +15,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-"""
-SQLAlchemy database backend
-"""
-import time
-
-from sqlalchemy.exc import OperationalError
-
-from nova import flags
-from nova import log as logging
-from nova.db.sqlalchemy import models
-
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.db.sqlalchemy')
-
-
-for i in xrange(FLAGS.sql_max_retries):
- if i > 0:
- time.sleep(FLAGS.sql_retry_interval)
-
- try:
- models.register_models()
- break
- except OperationalError:
- fconn = FLAGS.sql_connection
- fint = FLAGS.sql_retry_interval
- LOG.exception(_("Data store %(fconn)s is unreachable."
- " Trying again in %(fint)d seconds.") % locals())
diff --git a/nova/db/sqlalchemy/migrate_repo/README b/nova/db/sqlalchemy/migrate_repo/README
new file mode 100644
index 000000000..6218f8cac
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/README
@@ -0,0 +1,4 @@
+This is a database migration repository.
+
+More information at
+http://code.google.com/p/sqlalchemy-migrate/
diff --git a/nova/db/sqlalchemy/migrate_repo/__init__.py b/nova/db/sqlalchemy/migrate_repo/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/__init__.py
diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py
new file mode 100644
index 000000000..09e340f44
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/manage.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+from migrate.versioning.shell import main
+if __name__ == '__main__':
+ main(debug='False', repository='.')
diff --git a/nova/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/db/sqlalchemy/migrate_repo/migrate.cfg
new file mode 100644
index 000000000..2c75fb763
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/migrate.cfg
@@ -0,0 +1,20 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=nova
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
new file mode 100644
index 000000000..a312a7190
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
@@ -0,0 +1,547 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+## Table code mostly autogenerated by genmodel.py
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+auth_tokens = Table('auth_tokens', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('token_hash',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('user_id', Integer()),
+ Column('server_manageent_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('storage_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('cdn_management_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+export_devices = Table('export_devices', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('shelf_id', Integer()),
+ Column('blade_id', Integer()),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=True),
+ )
+
+
+fixed_ips = Table('fixed_ips', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('network_id',
+ Integer(),
+ ForeignKey('networks.id'),
+ nullable=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=True),
+ Column('allocated', Boolean(create_constraint=True, name=None)),
+ Column('leased', Boolean(create_constraint=True, name=None)),
+ Column('reserved', Boolean(create_constraint=True, name=None)),
+ )
+
+
+floating_ips = Table('floating_ips', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('fixed_ip_id',
+ Integer(),
+ ForeignKey('fixed_ips.id'),
+ nullable=True),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+instances = Table('instances', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('internal_id', Integer()),
+ Column('admin_pass',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('image_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('kernel_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('ramdisk_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('launch_index', Integer()),
+ Column('key_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('key_data',
+ Text(length=None, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('state', Integer()),
+ Column('state_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('memory_mb', Integer()),
+ Column('vcpus', Integer()),
+ Column('local_gb', Integer()),
+ Column('hostname',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_type',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('user_data',
+ Text(length=None, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('reservation_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('mac_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('scheduled_at', DateTime(timezone=False)),
+ Column('launched_at', DateTime(timezone=False)),
+ Column('terminated_at', DateTime(timezone=False)),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+iscsi_targets = Table('iscsi_targets', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('target_num', Integer()),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('volume_id',
+ Integer(),
+ ForeignKey('volumes.id'),
+ nullable=True),
+ )
+
+
+key_pairs = Table('key_pairs', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('fingerprint',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('public_key',
+ Text(length=None, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+networks = Table('networks', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('injected', Boolean(create_constraint=True, name=None)),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('netmask',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('bridge',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('gateway',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('broadcast',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dns',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vlan', Integer()),
+ Column('vpn_public_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vpn_public_port', Integer()),
+ Column('vpn_private_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dhcp_start',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+projects = Table('projects', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_manager',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ ForeignKey('users.id')),
+ )
+
+
+quotas = Table('quotas', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instances', Integer()),
+ Column('cores', Integer()),
+ Column('volumes', Integer()),
+ Column('gigabytes', Integer()),
+ Column('floating_ips', Integer()),
+ )
+
+
+security_groups = Table('security_groups', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+security_group_inst_assoc = Table('security_group_instance_association', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('security_group_id',
+ Integer(),
+ ForeignKey('security_groups.id')),
+ Column('instance_id', Integer(), ForeignKey('instances.id')),
+ )
+
+
+security_group_rules = Table('security_group_rules', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('parent_group_id',
+ Integer(),
+ ForeignKey('security_groups.id')),
+ Column('protocol',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('from_port', Integer()),
+ Column('to_port', Integer()),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('group_id',
+ Integer(),
+ ForeignKey('security_groups.id')),
+ )
+
+
+services = Table('services', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('binary',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('topic',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('report_count', Integer(), nullable=False),
+ Column('disabled', Boolean(create_constraint=True, name=None)),
+ )
+
+
+users = Table('users', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('access_key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('secret_key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('is_admin', Boolean(create_constraint=True, name=None)),
+ )
+
+
+user_project_association = Table('user_project_association', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ ForeignKey('users.id'),
+ primary_key=True,
+ nullable=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ ForeignKey('projects.id'),
+ primary_key=True,
+ nullable=False),
+ )
+
+
+user_project_role_association = Table('user_project_role_association', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('role',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ ForeignKeyConstraint(['user_id',
+ 'project_id'],
+ ['user_project_association.user_id',
+ 'user_project_association.project_id']),
+ )
+
+
+user_role_association = Table('user_role_association', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ ForeignKey('users.id'),
+ primary_key=True,
+ nullable=False),
+ Column('role',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ )
+
+
+volumes = Table('volumes', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('ec2_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('size', Integer()),
+ Column('availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=True),
+ Column('mountpoint',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('attach_time',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('attach_status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('scheduled_at', DateTime(timezone=False)),
+ Column('launched_at', DateTime(timezone=False)),
+ Column('terminated_at', DateTime(timezone=False)),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
+ instances, iscsi_targets, key_pairs, networks,
+ projects, quotas, security_groups, security_group_inst_assoc,
+ security_group_rules, services, users,
+ user_project_association, user_project_role_association,
+ user_role_association, volumes):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
+ instances, iscsi_targets, key_pairs, networks,
+ projects, quotas, security_groups, security_group_inst_assoc,
+ security_group_rules, services, users,
+ user_project_association, user_project_role_association,
+ user_role_association, volumes):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
new file mode 100644
index 000000000..bd3a3e6f8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -0,0 +1,209 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+services = Table('services', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+networks = Table('networks', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+certificates = Table('certificates', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('file_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+consoles = Table('consoles', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('instance_id', Integer()),
+ Column('password',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('port', Integer(), nullable=True),
+ Column('pool_id',
+ Integer(),
+ ForeignKey('console_pools.id')),
+ )
+
+
+console_pools = Table('console_pools', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('username',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('password',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('console_type',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('public_hostname',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('compute_host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+instance_actions = Table('instance_actions', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id')),
+ Column('action',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('error',
+ Text(length=None, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# Tables to alter
+#
+auth_tokens = Table('auth_tokens', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('token_hash',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ primary_key=True,
+ nullable=False),
+ Column('user_id', Integer()),
+ Column('server_manageent_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('storage_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('cdn_management_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+instances_availability_zone = Column(
+ 'availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+instances_locked = Column('locked',
+ Boolean(create_constraint=True, name=None))
+
+
+networks_cidr_v6 = Column(
+ 'cidr_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+networks_ra_server = Column(
+ 'ra_server',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+services_availability_zone = Column(
+ 'availability_zone',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (certificates, consoles, console_pools, instance_actions):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ auth_tokens.c.user_id.alter(type=String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+ instances.create_column(instances_availability_zone)
+ instances.create_column(instances_locked)
+ networks.create_column(networks_cidr_v6)
+ networks.create_column(networks_ra_server)
+ services.create_column(services_availability_zone)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
new file mode 100644
index 000000000..33d14827b
--- /dev/null
+++ b/nova/db/sqlalchemy/migration.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import flags
+
+import sqlalchemy
+from migrate.versioning import api as versioning_api
+from migrate.versioning import exceptions as versioning_exceptions
+
+FLAGS = flags.FLAGS
+
+
+def db_sync(version=None):
+ db_version()
+ repo_path = _find_migrate_repo()
+ return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version)
+
+
+def db_version():
+ repo_path = _find_migrate_repo()
+ try:
+ return versioning_api.db_version(FLAGS.sql_connection, repo_path)
+ except versioning_exceptions.DatabaseNotControlledError:
+ # If we aren't version controlled we may already have the database
+ # in the state from before we started version control, check for that
+ # and set up version_control appropriately
+ meta = sqlalchemy.MetaData()
+ engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
+ meta.reflect(bind=engine)
+ try:
+ for table in ('auth_tokens', 'export_devices', 'fixed_ips',
+ 'floating_ips', 'instances', 'iscsi_targets',
+ 'key_pairs', 'networks', 'projects', 'quotas',
+ 'security_group_rules',
+ 'security_group_instance_association', 'services',
+ 'users', 'user_project_association',
+ 'user_project_role_association', 'volumes'):
+ assert table in meta.tables
+ return db_version_control(1)
+ except AssertionError:
+ return db_version_control(0)
+
+
+def db_version_control(version=None):
+ repo_path = _find_migrate_repo()
+ versioning_api.version_control(FLAGS.sql_connection, repo_path, version)
+ return version
+
+
+def _find_migrate_repo():
+ """Get the path for the migrate repository."""
+ path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'migrate_repo')
+ assert os.path.exists(path)
+ return path
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index bf5e48b04..c54ebe3ba 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -100,51 +100,6 @@ class NovaBase(object):
return local.iteritems()
-# TODO(vish): Store images in the database instead of file system
-#class Image(BASE, NovaBase):
-# """Represents an image in the datastore"""
-# __tablename__ = 'images'
-# id = Column(Integer, primary_key=True)
-# ec2_id = Column(String(12), unique=True)
-# user_id = Column(String(255))
-# project_id = Column(String(255))
-# image_type = Column(String(255))
-# public = Column(Boolean, default=False)
-# state = Column(String(255))
-# location = Column(String(255))
-# arch = Column(String(255))
-# default_kernel_id = Column(String(255))
-# default_ramdisk_id = Column(String(255))
-#
-# @validates('image_type')
-# def validate_image_type(self, key, image_type):
-# assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw'])
-#
-# @validates('state')
-# def validate_state(self, key, state):
-# assert(state in ['available', 'pending', 'disabled'])
-#
-# @validates('default_kernel_id')
-# def validate_kernel_id(self, key, val):
-# if val != 'machine':
-# assert(val is None)
-#
-# @validates('default_ramdisk_id')
-# def validate_ramdisk_id(self, key, val):
-# if val != 'machine':
-# assert(val is None)
-#
-#
-# TODO(vish): To make this into its own table, we need a good place to
-# create the host entries. In config somwhere? Or the first
-# time any object sets host? This only becomes particularly
-# important if we need to store per-host data.
-#class Host(BASE, NovaBase):
-# """Represents a host where services are running"""
-# __tablename__ = 'hosts'
-# id = Column(String(255), primary_key=True)
-
-
class Service(BASE, NovaBase):
"""Represents a running service on a host."""
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 965f4bb46..37dd4f9e8 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -92,7 +92,7 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False,
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
-flags.DEFINE_bool('use_ipv6', True,
+flags.DEFINE_bool('use_ipv6', False,
'use the ipv6')
flags.DEFINE_string('network_host', socket.gethostname(),
'Network host to use for ip allocation in flat modes')
diff --git a/nova/service.py b/nova/service.py
index 98485bcac..705bed7fc 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -209,20 +209,6 @@ class Service(object):
self.model_disconnected = True
logging.exception(_("model server went away"))
- try:
- # NOTE(vish): This is late-loaded to make sure that the
- # database is not created before flags have
- # been loaded.
- from nova.db.sqlalchemy import models
- models.register_models()
- except OperationalError:
- fl_conn = FLAGS.sql_connection
- fl_intv = FLAGS.sql_retry_interval
- logging.exception(_("Data store %(fl_conn)s is"
- " unreachable. Trying again in %(fl_intv)d"
- " seconds.") % locals())
- time.sleep(FLAGS.sql_retry_interval)
-
def serve(*services):
FLAGS(sys.argv)
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 8dc87d0e2..592d5bea9 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -34,3 +34,8 @@
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)
+
+
+def setup():
+ from nova.db import migration
+ migration.db_sync()
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 7376a11dd..1097488ec 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -40,3 +40,4 @@ FLAGS.blades_per_shelf = 4
FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
FLAGS.sql_connection = 'sqlite:///nova.sqlite'
+FLAGS.use_ipv6 = True
diff --git a/nova/tests/glance/__init__.py b/nova/tests/glance/__init__.py
new file mode 100644
index 000000000..ef9fa05a7
--- /dev/null
+++ b/nova/tests/glance/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`glance` -- Stubs for Glance
+=================================
+"""
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
new file mode 100644
index 000000000..f182b857a
--- /dev/null
+++ b/nova/tests/glance/stubs.py
@@ -0,0 +1,37 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import StringIO
+
+import glance.client
+
+
+def stubout_glance_client(stubs, cls):
+ """Stubs out glance.client.Client"""
+ stubs.Set(glance.client, 'Client',
+ lambda *args, **kwargs: cls(*args, **kwargs))
+
+
+class FakeGlance(object):
+ def __init__(self, host, port=None, use_ssl=False):
+ pass
+
+ def get_image(self, image):
+ meta = {
+ 'size': 0,
+ }
+ image_file = StringIO.StringIO('')
+ return meta, image_file
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index afdc89ba2..f6800e3d9 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -122,10 +122,10 @@ class LibvirtConnTestCase(test.TestCase):
if rescue:
check = (lambda t: t.find('./os/kernel').text.split('/')[1],
- 'rescue-kernel')
+ 'kernel.rescue')
check_list.append(check)
check = (lambda t: t.find('./os/initrd').text.split('/')[1],
- 'rescue-ramdisk')
+ 'ramdisk.rescue')
check_list.append(check)
else:
if expect_kernel:
@@ -161,13 +161,16 @@ class LibvirtConnTestCase(test.TestCase):
if rescue:
common_checks += [
(lambda t: t.findall('./devices/disk/source')[0].get(
- 'file').split('/')[1], 'rescue-disk'),
+ 'file').split('/')[1], 'disk.rescue'),
(lambda t: t.findall('./devices/disk/source')[1].get(
'file').split('/')[1], 'disk')]
else:
common_checks += [(lambda t: t.findall(
'./devices/disk/source')[0].get('file').split('/')[1],
'disk')]
+ common_checks += [(lambda t: t.findall(
+ './devices/disk/source')[1].get('file').split('/')[1],
+ 'disk.local')]
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
@@ -225,12 +228,6 @@ class IptablesFirewallTestCase(test.TestCase):
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
- def _p(self, *args, **kwargs):
- if 'iptables-restore' in args:
- print ' '.join(args), kwargs['stdin']
- if 'iptables-save' in args:
- return
-
in_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
@@ -252,11 +249,21 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Mon Dec 6 11:54:13 2010',
]
+ in6_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011'
+ ]
+
def test_static_filters(self):
- self.fw.execute = self._p
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
- 'project_id': 'fake'})
+ 'project_id': 'fake',
+ 'mac_address': '56:12:12:12:12:12'})
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
@@ -301,18 +308,31 @@ class IptablesFirewallTestCase(test.TestCase):
secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
- self.fw.add_instance(instance_ref)
-
- out_rules = self.fw.modify_rules(self.in_rules)
+# self.fw.add_instance(instance_ref)
+ def fake_iptables_execute(cmd, process_input=None):
+ if cmd == 'sudo ip6tables-save -t filter':
+ return '\n'.join(self.in6_rules), None
+ if cmd == 'sudo iptables-save -t filter':
+ return '\n'.join(self.in_rules), None
+ if cmd == 'sudo iptables-restore':
+ self.out_rules = process_input.split('\n')
+ return '', ''
+ if cmd == 'sudo ip6tables-restore':
+ self.out6_rules = process_input.split('\n')
+ return '', ''
+ self.fw.execute = fake_iptables_execute
+
+ self.fw.prepare_instance_filter(instance_ref)
+ self.fw.apply_instance_filter(instance_ref)
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
for rule in in_rules:
if not 'nova' in rule:
- self.assertTrue(rule in out_rules,
+ self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
- for rule in out_rules:
+ for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-d 10.11.12.13 -j' in rule:
instance_chain = rule.split(' ')[-1]
@@ -320,7 +340,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
- for rule in out_rules:
+ for rule in self.out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
@@ -329,16 +349,16 @@ class IptablesFirewallTestCase(test.TestCase):
"The security group chain wasn't added")
self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
- security_group_chain in out_rules,
+ security_group_chain in self.out_rules,
"ICMP acceptance rule wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type'
- ' 8 -j ACCEPT' % security_group_chain in out_rules,
+ self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
+ '8 -j ACCEPT' % security_group_chain in self.out_rules,
"ICMP Echo Request acceptance rule wasn't added")
self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
'--dports 80:81 -j ACCEPT' % security_group_chain \
- in out_rules,
+ in self.out_rules,
"TCP port 80/81 acceptance rule wasn't added")
@@ -473,5 +493,6 @@ class NWFilterTestCase(test.TestCase):
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 261ee0fde..9f5b266f3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -34,6 +34,7 @@ from nova.virt.xenapi import volume_utils
from nova.virt.xenapi.vmops import SimpleDH
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
+from nova.tests.glance import stubs as glance_stubs
FLAGS = flags.FLAGS
@@ -108,18 +109,16 @@ class XenAPIVolumeTestCase(test.TestCase):
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.values)
- xenapi_fake.create_vm(instance.name, 'Running')
+ vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
- # Get XenAPI reference for the VM
- vms = xenapi_fake.get_all('VM')
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
- self.assertEqual(vm_ref, vms[0])
+ self.assertEqual(vm_ref, vm)
check()
@@ -157,9 +156,14 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
xenapi_fake.reset()
+ xenapi_fake.create_local_srs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ stubs.stubout_stream_disk(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
@@ -207,40 +211,70 @@ class XenAPIVMTestCase(test.TestCase):
check()
- def test_spawn(self):
- instance = self._create_instance()
+ def check_vm_record(self, conn):
+ instances = conn.list_instances()
+ self.assertEquals(instances, [1])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info(1)
+
+ # Get XenAPI record for VM
+ vms = [rec for ref, rec
+ in xenapi_fake.get_all_records('VM').iteritems()
+ if not rec['is_control_domain']]
+ vm = vms[0]
+
+ # Check that m1.large above turned into the right thing.
+ instance_type = instance_types.INSTANCE_TYPES['m1.large']
+ mem_kib = long(instance_type['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = instance_type['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm['memory_static_max'], mem_bytes)
+ self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEquals(vm['power_state'], 'Running')
+
+ def _test_spawn(self, image_id, kernel_id, ramdisk_id):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ values = {'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': image_id,
+ 'kernel_id': kernel_id,
+ 'ramdisk_id': ramdisk_id,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ conn = xenapi_conn.get_connection(False)
+ instance = db.instance_create(values)
+ conn.spawn(instance)
+ self.check_vm_record(conn)
- def check():
- instances = self.conn.list_instances()
- self.assertEquals(instances, [1])
-
- # Get Nova record for VM
- vm_info = self.conn.get_info(1)
-
- # Get XenAPI record for VM
- vms = xenapi_fake.get_all('VM')
- vm = xenapi_fake.get_record('VM', vms[0])
-
- # Check that m1.large above turned into the right thing.
- instance_type = instance_types.INSTANCE_TYPES['m1.large']
- mem_kib = long(instance_type['memory_mb']) << 10
- mem_bytes = str(mem_kib << 10)
- vcpus = instance_type['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm['memory_static_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
- self.assertEquals(vm['VCPUs_max'], str(vcpus))
- self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
-
- # Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
-
- # Check that the VM is running according to XenAPI.
- self.assertEquals(vm['power_state'], 'Running')
+ def test_spawn_raw_objectstore(self):
+ FLAGS.xenapi_image_service = 'objectstore'
+ self._test_spawn(1, None, None)
- check()
+ def test_spawn_objectstore(self):
+ FLAGS.xenapi_image_service = 'objectstore'
+ self._test_spawn(1, 2, 3)
+
+ def test_spawn_raw_glance(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(1, None, None)
+
+ def test_spawn_glance(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(1, 2, 3)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 292bd9ba9..624995ada 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -115,6 +115,21 @@ def stub_out_get_target(stubs):
stubs.Set(volume_utils, '_get_target', fake_get_target)
+def stubout_get_this_vm_uuid(stubs):
+ def f():
+ vms = [rec['uuid'] for ref, rec
+ in fake.get_all_records('VM').iteritems()
+ if rec['is_control_domain']]
+ return vms[0]
+ stubs.Set(vm_utils, 'get_this_vm_uuid', f)
+
+
+def stubout_stream_disk(stubs):
+ def f(_1, _2, _3, _4):
+ pass
+ stubs.Set(vm_utils, '_stream_disk', f)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
@@ -124,7 +139,10 @@ class FakeSessionForVMTests(fake.SessionBase):
return self.xenapi.network.get_all_records()
def host_call_plugin(self, _1, _2, _3, _4, _5):
- return ''
+ sr_ref = fake.get_all('SR')[0]
+ vdi_ref = fake.create_vdi('', False, sr_ref, False)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ return '<string>%s</string>' % vdi_rec['uuid']
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
@@ -159,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase):
def __init__(self, uri):
super(FakeSessionForVolumeTests, self).__init__(uri)
- def VBD_plug(self, _1, ref):
- rec = fake.get_record('VBD', ref)
- rec['currently-attached'] = True
-
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index a57a8f43b..f8b3c7807 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -310,6 +310,54 @@ class FakeConnection(object):
'username': 'fakeuser',
'password': 'fakepassword'}
+ def refresh_security_group_rules(self, security_group_id):
+ """This method is called after a change to security groups.
+
+ All security groups and their associated rules live in the datastore,
+ and calling this method should apply the updated rules to instances
+ running the specified security group.
+
+ An error should be raised if the operation cannot complete.
+
+ """
+ return True
+
+ def refresh_security_group_members(self, security_group_id):
+ """This method is called when a security group is added to an instance.
+
+ This message is sent to the virtualization drivers on hosts that are
+ running an instance that belongs to a security group that has a rule
+ that references the security group identified by `security_group_id`.
+ It is the responsiblity of this method to make sure any rules
+ that authorize traffic flow with members of the security group are
+ updated and any new members can communicate, and any removed members
+ cannot.
+
+ Scenario:
+ * we are running on host 'H0' and we have an instance 'i-0'.
+ * instance 'i-0' is a member of security group 'speaks-b'
+ * group 'speaks-b' has an ingress rule that authorizes group 'b'
+ * another host 'H1' runs an instance 'i-1'
+ * instance 'i-1' is a member of security group 'b'
+
+ When 'i-1' launches or terminates we will recieve the message
+ to update members of group 'b', at which time we will make
+ any changes needed to the rules for instance 'i-0' to allow
+ or deny traffic coming from 'i-1', depending on if it is being
+ added or removed from the group.
+
+ In this scenario, 'i-1' could just as easily have been running on our
+ host 'H0' and this method would still have been called. The point was
+ that this method isn't called on the host where instances of that
+ group are running (as is the case with
+ :method:`refresh_security_group_rules`) but is called where references
+ are made to authorizing those instances.
+
+ An error should be raised if the operation cannot complete.
+
+ """
+ return True
+
class FakeInstance(object):
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index de06a1eb0..8139c3620 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -18,10 +18,10 @@
#set $disk_prefix = 'vd'
#set $disk_bus = 'virtio'
<type>hvm</type>
- #end if
+ #end if
#if $getVar('rescue', False)
- <kernel>${basepath}/rescue-kernel</kernel>
- <initrd>${basepath}/rescue-ramdisk</initrd>
+ <kernel>${basepath}/kernel.rescue</kernel>
+ <initrd>${basepath}/ramdisk.rescue</initrd>
#else
#if $getVar('kernel', None)
<kernel>${kernel}</kernel>
@@ -47,7 +47,7 @@
#if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}'/>
- <source file='${basepath}/rescue-disk'/>
+ <source file='${basepath}/disk.rescue'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
<disk type='file'>
@@ -64,7 +64,7 @@
#if $getVar('local', False)
<disk type='file'>
<driver type='${driver_type}'/>
- <source file='${basepath}/local'/>
+ <source file='${basepath}/disk.local'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
#end if
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index f245392ac..0af9bf42d 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -351,7 +351,7 @@ class LibvirtConnection(object):
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
- self._create_image(instance, xml, 'rescue-', rescue_images)
+ self._create_image(instance, xml, '.rescue', rescue_images)
self._conn.createXML(xml, 0)
timer = utils.LoopingCall(f=None)
@@ -533,23 +533,23 @@ class LibvirtConnection(object):
utils.execute('truncate %s -s %dG' % (target, local_gb))
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None):
+ def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
# syntactic nicety
- def basepath(fname='', prefix=prefix):
+ def basepath(fname='', suffix=suffix):
return os.path.join(FLAGS.instances_path,
inst['name'],
- prefix + fname)
+ fname + suffix)
# ensure directories exist and are writable
- utils.execute('mkdir -p %s' % basepath(prefix=''))
- utils.execute('chmod 0777 %s' % basepath(prefix=''))
+ utils.execute('mkdir -p %s' % basepath(suffix=''))
+ utils.execute('chmod 0777 %s' % basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
- # NOTE(vish): No need add the prefix to console.log
+ # NOTE(vish): No need add the suffix to console.log
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
@@ -578,7 +578,7 @@ class LibvirtConnection(object):
root_fname = disk_images['image_id']
size = FLAGS.minimum_root_size
- if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-':
+ if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
root_fname += "_sm"
@@ -594,7 +594,7 @@ class LibvirtConnection(object):
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
- target=basepath('local'),
+ target=basepath('disk.local'),
fname="local_%s" % type_data['local_gb'],
cow=FLAGS.use_cow_images,
local_gb=type_data['local_gb'])
@@ -1123,6 +1123,10 @@ class NWFilterFirewall(FirewallDriver):
return
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index bcc129123..e8352771c 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -78,6 +78,7 @@ def reset():
for c in _CLASSES:
_db_content[c] = {}
create_host('fake')
+ create_vm('fake', 'Running', is_a_template=False, is_control_domain=True)
def create_host(name_label):
@@ -138,14 +139,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
def create_vbd(vm_ref, vdi_ref):
- vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref}
+ vbd_rec = {
+ 'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'currently_attached': False,
+ }
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
- """Create backref from VM to VBD when VBD is created"""
+ """Create read-only fields and backref from VM to VBD when VBD is
+ created."""
+ vbd_rec['currently_attached'] = False
+ vbd_rec['device'] = ''
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'] = [vbd_ref]
@@ -154,9 +162,10 @@ def after_VBD_create(vbd_ref, vbd_rec):
vbd_rec['vm_name_label'] = vm_name_label
-def create_pbd(config, sr_ref, attached):
+def create_pbd(config, host_ref, sr_ref, attached):
return _create_object('PBD', {
'device-config': config,
+ 'host': host_ref,
'SR': sr_ref,
'currently-attached': attached,
})
@@ -169,6 +178,33 @@ def create_task(name_label):
})
+def create_local_srs():
+ """Create an SR that looks like the one created on the local disk by
+ default by the XenServer installer. Do this one per host."""
+ for host_ref in _db_content['host'].keys():
+ _create_local_sr(host_ref)
+
+
+def _create_local_sr(host_ref):
+ sr_ref = _create_object('SR', {
+ 'name_label': 'Local storage',
+ 'type': 'lvm',
+ 'content_type': 'user',
+ 'shared': False,
+ 'physical_size': str(1 << 30),
+ 'physical_utilisation': str(0),
+ 'virtual_allocation': str(0),
+ 'other_config': {
+ 'i18n-original-value-name_label': 'Local storage',
+ 'i18n-key': 'local-storage',
+ },
+ 'VDIs': []
+ })
+ pbd_ref = create_pbd('', host_ref, sr_ref, True)
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+ return sr_ref
+
+
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
@@ -181,9 +217,10 @@ def _create_sr(table, obj):
# Forces fake to support iscsi only
if sr_type != 'iscsi':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
+ host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
vdi_ref = create_vdi('', False, sr_ref, False)
- pbd_ref = create_pbd('', sr_ref, True)
+ pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
@@ -235,6 +272,20 @@ class SessionBase(object):
def __init__(self, uri):
self._session = None
+ def VBD_plug(self, _1, ref):
+ rec = get_record('VBD', ref)
+ if rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
+ rec['currently_attached'] = True
+ rec['device'] = rec['userdevice']
+
+ def VBD_unplug(self, _1, ref):
+ rec = get_record('VBD', ref)
+ if not rec['currently_attached']:
+ raise Failure(['DEVICE_ALREADY_DETACHED', ref])
+ rec['currently_attached'] = False
+ rec['device'] = ''
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -292,6 +343,8 @@ class SessionBase(object):
return lambda *params: self._getter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
+ elif self._is_destroy(name):
+ return lambda *params: self._destroy(name, params)
else:
return None
@@ -302,10 +355,16 @@ class SessionBase(object):
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
+ return self._is_method(name, 'create')
+
+ def _is_destroy(self, name):
+ return self._is_method(name, 'destroy')
+
+ def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
- bits[1] == 'create')
+ bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
@@ -373,10 +432,9 @@ class SessionBase(object):
_create_sr(cls, params) or _create_object(cls, params[1])
# Call hook to provide any fixups needed (ex. creating backrefs)
- try:
- globals()["after_%s_create" % cls](ref, params[1])
- except KeyError:
- pass
+ after_hook = 'after_%s_create' % cls
+ if after_hook in globals():
+ globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
@@ -386,6 +444,15 @@ class SessionBase(object):
return ref
+ def _destroy(self, name, params):
+ self._check_session(params)
+ self._check_arg_count(params, 2)
+ table, _ = name.split('.')
+ ref = params[1]
+ if ref not in _db_content[table]:
+ raise Failure(['HANDLE_INVALID', table, ref])
+ del _db_content[table][ref]
+
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
@@ -423,7 +490,7 @@ class SessionBase(object):
try:
return result[0]
except IndexError:
- return None
+ raise Failure(['UUID_INVALID', v, result, recs, k])
return result
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index c87aee234..ebc903d7b 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -19,11 +19,14 @@ Helper methods for operations related to the management of VM records and
their attributes like VDIs, VIFs, as well as their lookup functions.
"""
+import os
import pickle
+import re
import urllib
from xml.dom import minidom
from eventlet import event
+import glance.client
from nova import exception
from nova import flags
from nova import log as logging
@@ -47,17 +50,23 @@ XENAPI_POWER_STATE = {
'Crashed': power_state.CRASHED}
+SECTOR_SIZE = 512
+MBR_SIZE_SECTORS = 63
+MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
+KERNEL_DIR = '/boot/guest'
+
+
class ImageType:
- """
- Enumeration class for distinguishing different image types
- 0 - kernel/ramdisk image (goes on dom0's filesystem)
- 1 - disk image (local SR, partitioned by objectstore plugin)
- 2 - raw disk image (local SR, NOT partitioned by plugin)
- """
+ """
+ Enumeration class for distinguishing different image types
+ 0 - kernel/ramdisk image (goes on dom0's filesystem)
+ 1 - disk image (local SR, partitioned by objectstore plugin)
+ 2 - raw disk image (local SR, NOT partitioned by plugin)
+ """
- KERNEL_RAMDISK = 0
- DISK = 1
- DISK_RAW = 2
+ KERNEL_RAMDISK = 0
+ DISK = 1
+ DISK_RAW = 2
class VMHelper(HelperBase):
@@ -209,6 +218,25 @@ class VMHelper(HelperBase):
return vif_ref
@classmethod
+ def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only):
+ """Create a VDI record and returns its reference."""
+ vdi_ref = session.get_xenapi().VDI.create(
+ {'name_label': name_label,
+ 'name_description': '',
+ 'SR': sr_ref,
+ 'virtual_size': str(virtual_size),
+ 'type': 'User',
+ 'sharable': False,
+ 'read_only': read_only,
+ 'xenstore_data': {},
+ 'other_config': {},
+ 'sm_config': {},
+ 'tags': []})
+ LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
+ name_label, virtual_size, read_only, sr_ref)
+ return vdi_ref
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
""" Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
@@ -259,7 +287,64 @@ class VMHelper(HelperBase):
def fetch_image(cls, session, instance_id, image, user, project, type):
"""
type is interpreted as an ImageType instance
+ Related flags:
+ xenapi_image_service = ['glance', 'objectstore']
+ glance_address = 'address for glance services'
+ glance_port = 'port for glance services'
"""
+ access = AuthManager().get_access_key(user, project)
+
+ if FLAGS.xenapi_image_service == 'glance':
+ return cls._fetch_image_glance(session, instance_id, image,
+ access, type)
+ else:
+ return cls._fetch_image_objectstore(session, instance_id, image,
+ access, user.secret, type)
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access, type):
+ sr = find_sr(session)
+ if sr is None:
+ raise exception.NotFound('Cannot find SR to write VDI to')
+
+ c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+
+ meta, image_file = c.get_image(image)
+ virtual_size = int(meta['size'])
+ vdi_size = virtual_size
+ LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ if type == ImageType.DISK:
+ # Make room for MBR.
+ vdi_size += MBR_SIZE_BYTES
+
+ vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
+ vdi_size, False)
+
+ with_vdi_attached_here(session, vdi, False,
+ lambda dev:
+ _stream_disk(dev, type,
+ virtual_size, image_file))
+ if (type == ImageType.KERNEL_RAMDISK):
+ #we need to invoke a plugin for copying VDI's
+ #content into proper path
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi
+ #let the plugin copy the correct number of bytes
+ args['image-size'] = str(vdi_size)
+ task = session.async_call_plugin('glance', fn, args)
+ filename = session.wait_for_task(instance_id, task)
+ #remove the VDI as it is not needed anymore
+ session.get_xenapi().VDI.destroy(vdi)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
+ return filename
+ else:
+ return session.get_xenapi().VDI.get_uuid(vdi)
+
+ @classmethod
+ def _fetch_image_objectstore(cls, session, instance_id, image, access,
+ secret, type):
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
@@ -267,7 +352,7 @@ class VMHelper(HelperBase):
args = {}
args['src_url'] = url
args['username'] = access
- args['password'] = user.secret
+ args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
if type != ImageType.KERNEL_RAMDISK:
@@ -279,14 +364,21 @@ class VMHelper(HelperBase):
return uuid
@classmethod
- def lookup_image(cls, session, vdi_ref):
+ def lookup_image(cls, session, instance_id, vdi_ref):
+ if FLAGS.xenapi_image_service == 'glance':
+ return cls._lookup_image_glance(session, vdi_ref)
+ else:
+ return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+
+ @classmethod
+ def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
- #TODO: Call proper function in plugin
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(task)
+ pv_str = session.wait_for_task(instance_id, task)
+ pv = None
if pv_str.lower() == 'true':
pv = True
elif pv_str.lower() == 'false':
@@ -295,6 +387,23 @@ class VMHelper(HelperBase):
return pv
@classmethod
+ def _lookup_image_glance(cls, session, vdi_ref):
+ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
+
+ def is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+ return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+
+ @classmethod
def lookup(cls, session, i):
"""Look the instance i up, and returns it if available"""
vms = session.get_xenapi().VM.get_by_name_label(i)
@@ -468,3 +577,123 @@ def get_vdi_for_vm_safely(session, vm_ref):
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
return vdi_ref, vdi_rec
+
+
+def find_sr(session):
+ host = session.get_xenapi_host()
+ srs = session.get_xenapi().SR.get_all()
+ for sr in srs:
+ sr_rec = session.get_xenapi().SR.get_record(sr)
+ if not ('i18n-key' in sr_rec['other_config'] and
+ sr_rec['other_config']['i18n-key'] == 'local-storage'):
+ continue
+ for pbd in sr_rec['PBDs']:
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd)
+ if pbd_rec['host'] == host:
+ return sr
+ return None
+
+
+def with_vdi_attached_here(session, vdi, read_only, f):
+ this_vm_ref = get_this_vm_ref(session)
+ vbd_rec = {}
+ vbd_rec['VM'] = this_vm_ref
+ vbd_rec['VDI'] = vdi
+ vbd_rec['userdevice'] = 'autodetect'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = read_only and 'RO' or 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
+ vbd = session.get_xenapi().VBD.create(vbd_rec)
+ LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
+ try:
+ LOG.debug(_('Plugging VBD %s ... '), vbd)
+ session.get_xenapi().VBD.plug(vbd)
+ LOG.debug(_('Plugging VBD %s done.'), vbd)
+ return f(session.get_xenapi().VBD.get_device(vbd))
+ finally:
+ LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
+ vbd_unplug_with_retry(session, vbd)
+ ignore_failure(session.get_xenapi().VBD.destroy, vbd)
+ LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
+
+
+def vbd_unplug_with_retry(session, vbd):
+ """Call VBD.unplug on the given VBD, with a retry if we get
+ DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
+ seeing the device still in use, even when all processes using the device
+ should be dead."""
+ while True:
+ try:
+ session.get_xenapi().VBD.unplug(vbd)
+ LOG.debug(_('VBD.unplug successful first time.'))
+ return
+ except VMHelper.XenAPI.Failure, e:
+ if (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_DETACH_REJECTED'):
+ LOG.debug(_('VBD.unplug rejected: retrying...'))
+ time.sleep(1)
+ elif (len(e.details) > 0 and
+ e.details[0] == 'DEVICE_ALREADY_DETACHED'):
+ LOG.debug(_('VBD.unplug successful eventually.'))
+ return
+ else:
+ LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'),
+ e)
+ return
+
+
+def ignore_failure(func, *args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except VMHelper.XenAPI.Failure, e:
+ LOG.error(_('Ignoring XenAPI.Failure %s'), e)
+ return None
+
+
+def get_this_vm_uuid():
+ with file('/sys/hypervisor/uuid') as f:
+ return f.readline().strip()
+
+
+def get_this_vm_ref(session):
+ return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
+
+
+def _stream_disk(dev, type, virtual_size, image_file):
+ offset = 0
+ if type == ImageType.DISK:
+ offset = MBR_SIZE_BYTES
+ _write_partition(virtual_size, dev)
+
+ with open('/dev/%s' % dev, 'wb') as f:
+ f.seek(offset)
+ for chunk in image_file:
+ f.write(chunk)
+
+
+def _write_partition(virtual_size, dev):
+ dest = '/dev/%s' % dev
+ mbr_last = MBR_SIZE_SECTORS - 1
+ primary_first = MBR_SIZE_SECTORS
+ primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
+
+ LOG.debug(_('Writing partition table %d %d to %s...'),
+ primary_first, primary_last, dest)
+
+ def execute(cmd, process_input=None, check_exit_code=True):
+ return utils.execute(cmd=cmd,
+ process_input=process_input,
+ check_exit_code=check_exit_code)
+
+ execute('parted --script %s mklabel msdos' % dest)
+ execute('parted --script %s mkpart primary %ds %ds' %
+ (dest, primary_first, primary_last))
+
+ LOG.debug(_('Writing partition table %s done.'), dest)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 635ade89f..1fdf79ad5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -85,7 +85,8 @@ class VMOps(object):
#Have a look at the VDI and see if it has a PV kernel
pv_kernel = False
if not instance.kernel_id:
- pv_kernel = VMHelper.lookup_image(self._session, vdi_ref)
+ pv_kernel = VMHelper.lookup_image(self._session, instance.id,
+ vdi_ref)
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 6707c9cf3..8abd82336 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -89,6 +89,9 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
+flags.DEFINE_string('xenapi_image_service',
+ 'glance',
+ 'Where to get VM images: glance or objectstore.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 71fe18a40..5fefa10cf 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -47,7 +47,7 @@ flags.DEFINE_integer('iscsi_num_targets',
'Number of iscsi target ids per host')
flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:',
'prefix for iscsi volumes')
-flags.DEFINE_string('iscsi_ip_prefix', '127.0',
+flags.DEFINE_string('iscsi_ip_prefix', '$my_ip',
'discover volumes on the ip that starts with this prefix')
flags.DEFINE_string('rbd_pool', 'rbd',
'the rbd pool in which volumes are stored')
@@ -285,7 +285,8 @@ class ISCSIDriver(VolumeDriver):
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
"-n node.startup -v automatic" %
(iscsi_name, iscsi_portal))
- return "/dev/iscsi/%s" % volume['name']
+ return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal,
+ iscsi_name)
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index cc34a1ec9..aadacce57 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -18,7 +18,7 @@
# under the License.
#
-# XenAPI plugin for putting images into glance
+# XenAPI plugin for managing glance images
#
import base64
@@ -40,8 +40,36 @@ from pluginlib_nova import *
configure_logging('glance')
CHUNK_SIZE = 8192
+KERNEL_DIR = '/boot/guest'
FILE_SR_PATH = '/var/run/sr-mount'
+def copy_kernel_vdi(session,args):
+ vdi = exists(args, 'vdi-ref')
+ size = exists(args,'image-size')
+ #Use the uuid as a filename
+ vdi_uuid=session.xenapi.VDI.get_uuid(vdi)
+ copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)}
+ filename=with_vdi_in_dom0(session, vdi, False,
+ lambda dev:
+ _copy_kernel_vdi('/dev/%s' % dev,copy_args))
+ return filename
+
+def _copy_kernel_vdi(dest,copy_args):
+ vdi_uuid=copy_args['vdi_uuid']
+ vdi_size=copy_args['vdi_size']
+ logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid)
+ filename=KERNEL_DIR + '/' + vdi_uuid
+ #read data from /dev/ and write into a file on /boot/guest
+ of=open(filename,'wb')
+ f=open(dest,'rb')
+ #copy only vdi_size bytes
+ data=f.read(vdi_size)
+ of.write(data)
+ f.close()
+ of.close()
+ logging.debug("Done. Filename: %s",filename)
+ return filename
+
def put_vdis(session, args):
params = pickle.loads(exists(args, 'params'))
vdi_uuids = params["vdi_uuids"]
@@ -128,4 +156,5 @@ def find_sr(session):
if __name__ == '__main__':
- XenAPIPlugin.dispatch({'put_vdis': put_vdis})
+ XenAPIPlugin.dispatch({'put_vdis': put_vdis,
+ 'copy_kernel_vdi': copy_kernel_vdi})
diff --git a/run_tests.py b/run_tests.py
index 5b8617f63..7b5e2192a 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -17,7 +17,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import gettext
import os
import unittest
import sys
diff --git a/tools/pip-requires b/tools/pip-requires
index 3aa76c24d..895e81eb3 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -25,4 +25,6 @@ bzr
Twisted>=10.1.0
PasteDeploy
paste
+sqlalchemy-migrate
netaddr
+glance