summaryrefslogtreecommitdiffstats
path: root/nova/db
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-03-28 12:13:20 -0500
committerTrey Morris <trey.morris@rackspace.com>2011-03-28 12:13:20 -0500
commit7eedf3f69ca1bbd1f44252fa01fb4f2676735eb2 (patch)
tree33c3983537da1d894caa6b390b5c488511df83c4 /nova/db
parent57890776d0d7e9172b1fa056076ce28ae4b34b7b (diff)
parented12a2cd2beef77d1c7e9d16771e766aa068530d (diff)
downloadnova-7eedf3f69ca1bbd1f44252fa01fb4f2676735eb2.tar.gz
nova-7eedf3f69ca1bbd1f44252fa01fb4f2676735eb2.tar.xz
nova-7eedf3f69ca1bbd1f44252fa01fb4f2676735eb2.zip
merge with trunk
Diffstat (limited to 'nova/db')
-rw-r--r--nova/db/api.py165
-rw-r--r--nova/db/base.py2
-rw-r--r--nova/db/sqlalchemy/api.py557
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py90
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py83
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py154
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/010_mac_address_table.py)0
-rw-r--r--nova/db/sqlalchemy/migration.py2
-rw-r--r--nova/db/sqlalchemy/models.py83
13 files changed, 1302 insertions, 83 deletions
diff --git a/nova/db/api.py b/nova/db/api.py
index 5b92afbcd..036caa585 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -71,6 +71,7 @@ class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
+
###################
@@ -80,11 +81,16 @@ def service_destroy(context, instance_id):
def service_get(context, service_id):
- """Get an service or raise if it does not exist."""
+ """Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
-def service_get_all(context, disabled=False):
+def service_get_by_host_and_topic(context, host, topic):
+ """Get a service by host it's on and topic it listens to"""
+ return IMPL.service_get_by_host_and_topic(context, host, topic)
+
+
+def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
@@ -99,6 +105,11 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
+def service_get_all_compute_by_host(context, host):
+ """Get all compute services for a given host."""
+ return IMPL.service_get_all_compute_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -148,6 +159,29 @@ def service_update(context, service_id, values):
###################
+def compute_node_get(context, compute_id, session=None):
+ """Get an computeNode or raise if it does not exist."""
+ return IMPL.compute_node_get(context, compute_id)
+
+
+def compute_node_create(context, values):
+ """Create a computeNode from the values dictionary."""
+ return IMPL.compute_node_create(context, values)
+
+
+def compute_node_update(context, compute_id, values):
+ """Set the given properties on an computeNode and update it.
+
+ Raises NotFound if computeNode does not exist.
+
+ """
+
+ return IMPL.compute_node_update(context, compute_id, values)
+
+
+###################
+
+
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
@@ -181,7 +215,7 @@ def certificate_update(context, certificate_id, values):
Raises NotFound if service does not exist.
"""
- return IMPL.service_update(context, certificate_id, values)
+ return IMPL.certificate_update(context, certificate_id, values)
###################
@@ -252,6 +286,33 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """Update a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_update(context, address, values)
+
+
+####################
+
+def migration_update(context, id, values):
+ """Update a migration instance"""
+ return IMPL.migration_update(context, id, values)
+
+
+def migration_create(context, values):
+ """Create a migration record"""
+ return IMPL.migration_create(context, values)
+
+
+def migration_get(context, migration_id):
+ """Finds a migration by the id"""
+ return IMPL.migration_get(context, migration_id)
+
+
+def migration_get_by_instance_and_status(context, instance_id, status):
+ """Finds a migration by the instance id its migrating"""
+ return IMPL.migration_get_by_instance_and_status(context, instance_id,
+ status)
+
####################
@@ -293,6 +354,11 @@ def fixed_ip_get_all(context):
return IMPL.fixed_ip_get_all(context)
+def fixed_ip_get_all_by_host(context, host):
+ """Get all defined fixed ips used by a host."""
+ return IMPL.fixed_ip_get_all_by_host(context, host)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
@@ -445,6 +511,27 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ """Get instances.vcpus by host and project."""
+ return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ """Get amount of memory by host and project."""
+ return IMPL.instance_get_memory_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ """Get total amount of disk by host and project."""
+ return IMPL.instance_get_disk_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
@@ -521,6 +608,13 @@ def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
+def network_delete_safe(context, network_id):
+ """Delete network with key network_id.
+ This method assumes that the network is not associated with any project
+ """
+ return IMPL.network_delete_safe(context, network_id)
+
+
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
@@ -546,7 +640,7 @@ def network_get_all(context):
return IMPL.network_get_all(context)
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id)
@@ -557,6 +651,11 @@ def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
+def network_get_by_cidr(context, cidr):
+ """Get a network by cidr or raise if it does not exist"""
+ return IMPL.network_get_by_cidr(context, cidr)
+
+
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
@@ -757,6 +856,11 @@ def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes belonging to a instance."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
@@ -1038,6 +1142,41 @@ def console_get(context, console_id, instance_id=None):
return IMPL.console_get(context, console_id, instance_id)
+ ##################
+
+
+def instance_type_create(context, values):
+ """Create a new instance type"""
+ return IMPL.instance_type_create(context, values)
+
+
+def instance_type_get_all(context, inactive=False):
+ """Get all instance types"""
+ return IMPL.instance_type_get_all(context, inactive)
+
+
+def instance_type_get_by_name(context, name):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_name(context, name)
+
+
+def instance_type_get_by_flavor_id(context, id):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_flavor_id(context, id)
+
+
+def instance_type_destroy(context, name):
+ """Delete a instance type"""
+ return IMPL.instance_type_destroy(context, name)
+
+
+def instance_type_purge(context, name):
+ """Purges (removes) an instance type from DB
+ Use instance_type_destroy for most cases
+ """
+ return IMPL.instance_type_purge(context, name)
+
+
####################
@@ -1064,3 +1203,21 @@ def zone_get(context, zone_id):
def zone_get_all(context):
"""Get all child Zones."""
return IMPL.zone_get_all(context)
+
+
+####################
+
+
+def instance_metadata_get(context, instance_id):
+ """Get all metadata for an instance"""
+ return IMPL.instance_metadata_get(context, instance_id)
+
+
+def instance_metadata_delete(context, instance_id, key):
+ """Delete the given metadata item"""
+ IMPL.instance_metadata_delete(context, instance_id, key)
+
+
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ """Create or update instance metadata"""
+ IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
diff --git a/nova/db/base.py b/nova/db/base.py
index 1d1e80866..a0f2180c6 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -33,4 +33,4 @@ class Base(object):
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
- self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
+ self.db = utils.import_object(db_driver) # pylint: disable=C0103
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 6e59f4b1d..e69a5c680 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
@@ -118,6 +119,11 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
+ if service_ref.topic == 'compute' and \
+ len(service_ref.compute_node) != 0:
+ for c in service_ref.compute_node:
+ c.delete(session=session)
+
@require_admin_context
def service_get(context, service_id, session=None):
@@ -125,6 +131,7 @@ def service_get(context, service_id, session=None):
session = get_session()
result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -136,12 +143,15 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, disabled=False):
+def service_get_all(context, disabled=None):
session = get_session()
- return session.query(models.Service).\
- filter_by(deleted=can_read_deleted(context)).\
- filter_by(disabled=disabled).\
- all()
+ query = session.query(models.Service).\
+ filter_by(deleted=can_read_deleted(context))
+
+ if disabled is not None:
+ query = query.filter_by(disabled=disabled)
+
+ return query.all()
@require_admin_context
@@ -155,6 +165,17 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_by_host_and_topic(context, host, topic):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ first()
+
+
+@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
@@ -164,6 +185,24 @@ def service_get_all_by_host(context, host):
@require_admin_context
+def service_get_all_compute_by_host(context, host):
+ topic = 'compute'
+ session = get_session()
+ result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_("%s does not exist or is not "
+ "a compute node.") % host)
+
+ return result
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -274,6 +313,42 @@ def service_update(context, service_id, values):
@require_admin_context
+def compute_node_get(context, compute_id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.ComputeNode).\
+ filter_by(id=compute_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+
+ if not result:
+ raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+
+ return result
+
+
+@require_admin_context
+def compute_node_create(context, values):
+ compute_node_ref = models.ComputeNode()
+ compute_node_ref.update(values)
+ compute_node_ref.save()
+ return compute_node_ref
+
+
+@require_admin_context
+def compute_node_update(context, compute_id, values):
+ session = get_session()
+ with session.begin():
+ compute_ref = compute_node_get(context, compute_id, session=session)
+ compute_ref.update(values)
+ compute_ref.save(session=session)
+
+
+###################
+
+
+@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
@@ -494,6 +569,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -566,18 +651,17 @@ def fixed_ip_disassociate(context, address):
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
- # NOTE(vish): The nested select is because sqlite doesn't support
- # JOINs in UPDATEs.
- result = session.execute('UPDATE fixed_ips SET instance_id = NULL, '
- 'leased = 0 '
- 'WHERE network_id IN (SELECT id FROM networks '
- 'WHERE host = :host) '
- 'AND updated_at < :time '
- 'AND instance_id IS NOT NULL '
- 'AND allocated = 0',
- {'host': host,
- 'time': time})
- return result.rowcount
+ inner_q = session.query(models.Network.id).\
+ filter_by(host=host).\
+ subquery()
+ result = session.query(models.FixedIp).\
+ filter(models.FixedIp.network_id.in_(inner_q)).\
+ filter(models.FixedIp.updated_at < time).\
+ filter(models.FixedIp.instance_id != None).\
+ filter_by(allocated=0).\
+ update({'instance_id': None,
+ 'leased': 0})
+ return result
@require_admin_context
@@ -591,6 +675,22 @@ def fixed_ip_get_all(context, session=None):
return result
+@require_admin_context
+def fixed_ip_get_all_by_host(context, host=None):
+ session = get_session()
+
+ result = session.query(models.FixedIp).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_('No fixed ips for this host defined'))
+
+ return result
+
+
@require_context
def fixed_ip_get_by_address(context, address, session=None):
if not session:
@@ -762,6 +862,15 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
+ metadata = values.get('metadata')
+ metadata_refs = []
+ if metadata:
+ for metadata_item in metadata:
+ metadata_ref = models.InstanceMetadata()
+ metadata_ref.update(metadata_item)
+ metadata_refs.append(metadata_ref)
+ values['metadata'] = metadata_refs
+
instance_ref = models.Instance()
instance_ref.update(values)
@@ -787,14 +896,21 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- session.execute('update instances set deleted=1,'
- 'deleted_at=:at where id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at where instance_id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1002,6 +1118,45 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.vcpus))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.memory_mb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.local_gb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -1047,9 +1202,11 @@ def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update key_pairs set deleted=1 where user_id=:id',
- {'id': user_id})
+ session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1152,6 +1309,15 @@ def network_create_safe(context, values):
@require_admin_context
+def network_delete_safe(context, network_id):
+ session = get_session()
+ with session.begin():
+ network_ref = network_get(context, network_id=network_id, \
+ session=session)
+ session.delete(network_ref)
+
+
+@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
'host': None})
@@ -1160,7 +1326,9 @@ def network_disassociate(context, network_id):
@require_admin_context
def network_disassociate_all(context):
session = get_session()
- session.execute('update networks set project_id=NULL')
+ session.query(models.Network).\
+ update({'project_id': None,
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1198,7 +1366,7 @@ def network_get_all(context):
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
@require_admin_context
@@ -1226,6 +1394,18 @@ def network_get_by_bridge(context, bridge):
@require_admin_context
+def network_get_by_cidr(context, cidr):
+ session = get_session()
+ result = session.query(models.Network).\
+ filter_by(cidr=cidr).first()
+
+ if not result:
+ raise exception.NotFound(_('Network with cidr %s does not exist') %
+ cidr)
+ return result
+
+
+@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
@@ -1531,15 +1711,17 @@ def volume_data_get_for_project(context, project_id):
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update volumes set deleted=1 where id=:id',
- {'id': volume_id})
- session.execute('update export_devices set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
- session.execute('update iscsi_targets set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
+ session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.ExportDevice).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
+ session.query(models.IscsiTarget).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
@require_admin_context
@@ -1599,6 +1781,18 @@ def volume_get_all_by_host(context, host):
all()
+@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -1759,17 +1953,21 @@ def security_group_create(context, values):
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1 where id=:id',
- {'id': security_group_id})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at '
- 'where security_group_id=:id',
- {'id': security_group_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_rules set deleted=1 '
- 'where group_id=:id',
- {'id': security_group_id})
+ session.query(models.SecurityGroup).\
+ filter_by(id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(security_group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ filter_by(group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1777,9 +1975,14 @@ def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1')
- session.execute('update security_group_rules set deleted=1')
+ session.query(models.SecurityGroup).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
###################
@@ -1908,12 +2111,15 @@ def user_create(_context, values):
def user_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_role_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where user_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
@@ -1970,8 +2176,11 @@ def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
+ filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
+ if not user:
+ raise exception.NotFound(_('Invalid user_id %s') % user_id)
return user.projects
@@ -2004,10 +2213,12 @@ def project_update(context, project_id, values):
def project_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where project_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where project_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(project_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(project_id=id).\
+ delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@@ -2032,11 +2243,11 @@ def user_get_roles_for_project(context, user_id, project_id):
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
- session.execute('delete from user_project_role_association where '
- 'user_id=:user_id and project_id=:project_id and '
- 'role=:role', {'user_id': user_id,
- 'project_id': project_id,
- 'role': role})
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ filter_by(role=role).\
+ delete()
def user_remove_role(context, user_id, role):
@@ -2081,6 +2292,51 @@ def host_get_networks(context, host):
all()
+###################
+
+
+@require_admin_context
+def migration_create(context, values):
+ migration = models.Migration()
+ migration.update(values)
+ migration.save()
+ return migration
+
+
+@require_admin_context
+def migration_update(context, id, values):
+ session = get_session()
+ with session.begin():
+ migration = migration_get(context, id, session=session)
+ migration.update(values)
+ migration.save(session=session)
+ return migration
+
+
+@require_admin_context
+def migration_get(context, id, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(id=id).first()
+ if not result:
+ raise exception.NotFound(_("No migration found with id %s")
+ % id)
+ return result
+
+
+@require_admin_context
+def migration_get_by_instance_and_status(context, instance_id, status):
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(instance_id=instance_id).\
+ filter_by(status=status).first()
+ if not result:
+ raise exception.NotFound(_("No migration found for instance "
+ "%(instance_id)s with status %(status)s") % locals())
+ return result
+
+
##################
@@ -2142,8 +2398,9 @@ def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
- session.execute('delete from consoles '
- 'where id=:id', {'id': console_id})
+ session.query(models.Console).\
+ filter_by(id=console_id).\
+ delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
@@ -2182,6 +2439,98 @@ def console_get(context, console_id, instance_id=None):
return result
+ ##################
+
+
+@require_admin_context
+def instance_type_create(_context, values):
+ try:
+ instance_type_ref = models.InstanceTypes()
+ instance_type_ref.update(values)
+ instance_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return instance_type_ref
+
+
+@require_context
+def instance_type_get_all(context, inactive=False):
+ """
+ Returns a dict describing all instance_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ inst_types = session.query(models.InstanceTypes).\
+ order_by("name").\
+ all()
+ else:
+ inst_types = session.query(models.InstanceTypes).\
+ filter_by(deleted=False).\
+ order_by("name").\
+ all()
+ if inst_types:
+ inst_dict = {}
+ for i in inst_types:
+ inst_dict[i['name']] = dict(i)
+ return inst_dict
+ else:
+ raise exception.NotFound
+
+
+@require_context
+def instance_type_get_by_name(context, name):
+ """Returns a dict describing specific instance_type"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(name=name).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No instance type with name %s") % name)
+ else:
+ return dict(inst_type)
+
+
+@require_context
+def instance_type_get_by_flavor_id(context, id):
+ """Returns a dict describing specific flavor_id"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(flavorid=int(id)).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No flavor with flavorid %s") % id)
+ else:
+ return dict(inst_type)
+
+
+@require_admin_context
+def instance_type_destroy(context, name):
+ """ Marks specific instance_type as deleted"""
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.update(dict(deleted=True))
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
+@require_admin_context
+def instance_type_purge(context, name):
+ """ Removes specific instance_type from DB
+ Usually instance_type_destroy should be used
+ """
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.delete()
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
####################
@@ -2195,6 +2544,7 @@ def zone_create(context, values):
@require_admin_context
def zone_update(context, zone_id, values):
+ session = get_session()
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
@@ -2207,8 +2557,9 @@ def zone_update(context, zone_id, values):
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
- session.execute('delete from zones '
- 'where id=:id', {'id': zone_id})
+ session.query(models.Zone).\
+ filter_by(id=zone_id).\
+ delete()
@require_admin_context
@@ -2224,3 +2575,65 @@ def zone_get(context, zone_id):
def zone_get_all(context):
session = get_session()
return session.query(models.Zone).all()
+
+
+####################
+
+@require_context
+def instance_metadata_get(context, instance_id):
+ session = get_session()
+
+ meta_results = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+
+ meta_dict = {}
+ for i in meta_results:
+ meta_dict[i['key']] = i['value']
+ return meta_dict
+
+
+@require_context
+def instance_metadata_delete(context, instance_id, key):
+ session = get_session()
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_metadata_get_item(context, instance_id, key):
+ session = get_session()
+
+ meta_result = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not meta_result:
+ raise exception.NotFound(_('Invalid metadata key for instance %s') %
+ instance_id)
+ return meta_result
+
+
+@require_context
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ session = get_session()
+ meta_ref = None
+ for key, value in metadata.iteritems():
+ try:
+ meta_ref = instance_metadata_get_item(context, instance_id, key,
+ session)
+ except:
+ meta_ref = models.InstanceMetadata()
+ meta_ref.update({"key": key, "value": value,
+ "instance_id": instance_id,
+ "deleted": 0})
+ meta_ref.save(session=session)
+ return metadata
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
new file mode 100644
index 000000000..427934d53
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -0,0 +1,90 @@
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+fixed_ips = Table(
+ "fixed_ips",
+ meta,
+ Column(
+ "id",
+ Integer(),
+ primary_key=True,
+ nullable=False))
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+fixed_ips_addressV6 = Column(
+ "addressV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_netmaskV6 = Column(
+ "netmaskV6",
+ String(
+ length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_gatewayV6 = Column(
+ "gatewayV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ fixed_ips.create_column(fixed_ips_addressV6)
+ fixed_ips.create_column(fixed_ips_netmaskV6)
+ fixed_ips.create_column(fixed_ips_gatewayV6)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
new file mode 100644
index 000000000..5e2cb69d9
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import api
+from nova import db
+from nova import log as logging
+
+import datetime
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+instance_types = Table('instance_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('vcpus', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('flavorid', Integer(), nullable=False, unique=True),
+ Column('swap', Integer(), nullable=False, default=0),
+ Column('rxtx_quota', Integer(), nullable=False, default=0),
+ Column('rxtx_cap', Integer(), nullable=False, default=0))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here
+ # Don't create your own engine; bind migrate_engine
+ # to your metadata
+ meta.bind = migrate_engine
+ try:
+ instance_types.create()
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while creating instance_types table')
+ raise
+
+ # Here are the old static instance types
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ try:
+ i = instance_types.insert()
+ for name, values in INSTANCE_TYPES.iteritems():
+ # FIXME(kpepple) should we be seeding created_at / updated_at ?
+ # now = datetime.datatime.utcnow()
+ i.execute({'name': name, 'memory_mb': values["memory_mb"],
+ 'vcpus': values["vcpus"], 'deleted': False,
+ 'local_gb': values["local_gb"],
+ 'flavorid': values["flavorid"]})
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while seeding instance_types table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_types):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
new file mode 100644
index 000000000..4fda525f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+migrations = Table('migrations', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('source_compute', String(255)),
+ Column('dest_compute', String(255)),
+ Column('dest_host', String(255)),
+ Column('instance_id', Integer, ForeignKey('instances.id'),
+ nullable=True),
+ Column('status', String(255)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (migrations, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
new file mode 100644
index 000000000..eb3066894
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+instances_os_type = Column('os_type',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances.create_column(instances_os_type)
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.os_type == None)\
+ .values(os_type='linux'))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column('os_type')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
new file mode 100644
index 000000000..23ccccb4e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import *
+from nova import log as logging
+from sqlalchemy import *
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+compute_nodes = Table('compute_nodes', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('service_id', Integer(), nullable=False),
+
+ Column('vcpus', Integer(), nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('vcpus_used', Integer(), nullable=False),
+ Column('memory_mb_used', Integer(), nullable=False),
+ Column('local_gb_used', Integer(), nullable=False),
+ Column('hypervisor_type',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('hypervisor_version', Integer(), nullable=False),
+ Column('cpu_info',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ )
+
+
+#
+# Tables to alter
+#
+instances_launched_on = Column(
+ 'launched_on',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ compute_nodes.create()
+ except Exception:
+ logging.info(repr(compute_nodes))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[compute_nodes])
+ raise
+
+ instances.create_column(instances_launched_on)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
new file mode 100644
index 000000000..e87085668
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2011 NTT.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+networks = Table('networks', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('injected', Boolean(create_constraint=True, name=None)),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('netmask',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('bridge',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('gateway',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('broadcast',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dns',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vlan', Integer()),
+ Column('vpn_public_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vpn_public_port', Integer()),
+ Column('vpn_private_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dhcp_start',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('cidr_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('ra_server', String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column(
+ 'label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+fixed_ips = Table('fixed_ips', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('network_id',
+ Integer(),
+ ForeignKey('networks.id'),
+ nullable=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=True),
+ Column('allocated', Boolean(create_constraint=True, name=None)),
+ Column('leased', Boolean(create_constraint=True, name=None)),
+ Column('reserved', Boolean(create_constraint=True, name=None)),
+ Column("addressV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("netmaskV6", String(length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("gatewayV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ )
+#
+# New Tables
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+networks_netmask_v6 = Column(
+ 'netmask_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Alter column name
+ networks.c.ra_server.alter(name='gateway_v6')
+ # Add new column to existing table
+ networks.create_column(networks_netmask_v6)
+
+ # drop existing columns from table
+ fixed_ips.c.addressV6.drop()
+ fixed_ips.c.netmaskV6.drop()
+ fixed_ips.c.gatewayV6.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
new file mode 100644
index 000000000..3fb92e85c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
@@ -0,0 +1,50 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+migrations = Table('migrations', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+#
+
+old_flavor_id = Column('old_flavor_id', Integer())
+new_flavor_id = Column('new_flavor_id', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ migrations.create_column(old_flavor_id)
+ migrations.create_column(new_flavor_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ migrations.drop_column(old_flavor_id)
+ migrations.drop_column(new_flavor_id)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_mac_address_table.py b/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py
index b8b57b284..b8b57b284 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/010_mac_address_table.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 9bdaa6d6b..d9e303599 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -60,7 +60,7 @@ def db_version():
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
- 'services',
+ 'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index bbadbeee4..42d8c1512 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -113,6 +113,41 @@ class Service(BASE, NovaBase):
availability_zone = Column(String(255), default='nova')
+class ComputeNode(BASE, NovaBase):
+ """Represents a running compute service on a host."""
+
+ __tablename__ = 'compute_nodes'
+ id = Column(Integer, primary_key=True)
+ service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
+ service = relationship(Service,
+ backref=backref('compute_node'),
+ foreign_keys=service_id,
+ primaryjoin='and_('
+ 'ComputeNode.service_id == Service.id,'
+ 'ComputeNode.deleted == False)')
+
+ vcpus = Column(Integer, nullable=True)
+ memory_mb = Column(Integer, nullable=True)
+ local_gb = Column(Integer, nullable=True)
+ vcpus_used = Column(Integer, nullable=True)
+ memory_mb_used = Column(Integer, nullable=True)
+ local_gb_used = Column(Integer, nullable=True)
+ hypervisor_type = Column(Text, nullable=True)
+ hypervisor_version = Column(Integer, nullable=True)
+
+ # Note(masumotok): Expected Strings example:
+ #
+ # '{"arch":"x86_64",
+ # "model":"Nehalem",
+ # "topology":{"sockets":1, "threads":2, "cores":3},
+ # "features":["tdtscp", "xtpr"]}'
+ #
+ # Points are "json translatable" and it must have all dictionary keys
+ # above, since it is copied from <cpu> tag of getCapabilities()
+ # (See libvirt.virtConnection).
+ cpu_info = Column(Text, nullable=True)
+
+
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
@@ -126,11 +161,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ injected_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
@@ -185,8 +225,13 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ # To remember on which host a instance booted.
+ # An instance may have moved to another host by live migraiton.
+ launched_on = Column(Text)
locked = Column(Boolean)
+ os_type = Column(String(255))
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused
@@ -209,6 +254,20 @@ class InstanceActions(BASE, NovaBase):
error = Column(Text)
+class InstanceTypes(BASE, NovaBase):
+ """Represent possible instance_types or flavor of VM offered"""
+ __tablename__ = "instance_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+ memory_mb = Column(Integer)
+ vcpus = Column(Integer)
+ local_gb = Column(Integer)
+ flavorid = Column(Integer, unique=True)
+ swap = Column(Integer, nullable=False, default=0)
+ rxtx_quota = Column(Integer, nullable=False, default=0)
+ rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@@ -369,6 +428,20 @@ class KeyPair(BASE, NovaBase):
public_key = Column(Text)
+class Migration(BASE, NovaBase):
+ """Represents a running host-to-host migration."""
+ __tablename__ = 'migrations'
+ id = Column(Integer, primary_key=True, nullable=False)
+ source_compute = Column(String(255))
+ dest_compute = Column(String(255))
+ dest_host = Column(String(255))
+ old_flavor_id = Column(Integer())
+ new_flavor_id = Column(Integer())
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ #TODO(_cerberus_): enum
+ status = Column(String(255))
+
+
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
@@ -382,8 +455,8 @@ class Network(BASE, NovaBase):
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
- ra_server = Column(String(255))
-
+ gateway_v6 = Column(String(255))
+ netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
gateway = Column(String(255))
@@ -581,12 +654,12 @@ def register_models():
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (Service, Instance, InstanceActions,
+ models = (Service, Instance, InstanceActions, InstanceTypes,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- InstanceMetadata)
+ InstanceMetadata, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)