summaryrefslogtreecommitdiffstats
path: root/nova/db
diff options
context:
space:
mode:
Diffstat (limited to 'nova/db')
-rw-r--r--nova/db/api.py104
-rw-r--r--nova/db/sqlalchemy/api.py317
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py83
-rw-r--r--nova/db/sqlalchemy/models.py42
5 files changed, 502 insertions, 95 deletions
diff --git a/nova/db/api.py b/nova/db/api.py
index d56d6f404..47a8ca1cb 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -79,33 +79,6 @@ class NoMoreTargets(exception.Error):
###################
-def reroute_if_not_found(key_args_index=None):
- """Decorator used to indicate that the method should throw
- a RouteRedirectException if the query can't find anything.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except exception.InstanceNotFound, e:
- context = args[0]
- key = None
- if key_args_index:
- key = args[key_args_index]
- LOG.debug(_("Instance %(key)s not found locally: '%(e)s'" %
- locals()))
-
- # Throw a reroute Exception for the middleware to pick up.
- LOG.debug("Firing ZoneRouteException")
- zones = zone_get_all(context)
- raise exception.ZoneRouteException(zones, e)
- return wrapped_f
- return wrap
-
-
-###################
-
-
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
@@ -136,6 +109,11 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
+def service_get_all_compute_by_host(context, host):
+ """Get all compute services for a given host."""
+ return IMPL.service_get_all_compute_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -185,6 +163,29 @@ def service_update(context, service_id, values):
###################
+def compute_node_get(context, compute_id, session=None):
+ """Get an computeNode or raise if it does not exist."""
+ return IMPL.compute_node_get(context, compute_id)
+
+
+def compute_node_create(context, values):
+ """Create a computeNode from the values dictionary."""
+ return IMPL.compute_node_create(context, values)
+
+
+def compute_node_update(context, compute_id, values):
+ """Set the given properties on an computeNode and update it.
+
+ Raises NotFound if computeNode does not exist.
+
+ """
+
+ return IMPL.compute_node_update(context, compute_id, values)
+
+
+###################
+
+
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
@@ -289,6 +290,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """Update a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_update(context, address, values)
+
+
####################
def migration_update(context, id, values):
@@ -352,6 +358,11 @@ def fixed_ip_get_all(context):
return IMPL.fixed_ip_get_all(context)
+def fixed_ip_get_all_by_host(context, host):
+ """Get all defined fixed ips used by a host."""
+ return IMPL.fixed_ip_get_all_by_host(context, host)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
@@ -399,7 +410,6 @@ def instance_destroy(context, instance_id):
return IMPL.instance_destroy(context, instance_id)
-@reroute_if_not_found(key_args_index=1)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
@@ -474,6 +484,27 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ """Get instances.vcpus by host and project."""
+ return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ """Get amount of memory by host and project."""
+ return IMPL.instance_get_memory_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ """Get total amount of disk by host and project."""
+ return IMPL.instance_get_disk_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
@@ -550,6 +581,13 @@ def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
+def network_delete_safe(context, network_id):
+ """Delete network with key network_id.
+ This method assumes that the network is not associated with any project
+ """
+ return IMPL.network_delete_safe(context, network_id)
+
+
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
@@ -586,6 +624,11 @@ def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
+def network_get_by_cidr(context, cidr):
+ """Get a network by cidr or raise if it does not exist"""
+ return IMPL.network_get_by_cidr(context, cidr)
+
+
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
@@ -786,6 +829,11 @@ def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes belonging to a instance."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 5e498fc6f..44540617f 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
@@ -118,6 +119,11 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
+ if service_ref.topic == 'compute' and \
+ len(service_ref.compute_node) != 0:
+ for c in service_ref.compute_node:
+ c.delete(session=session)
+
@require_admin_context
def service_get(context, service_id, session=None):
@@ -125,6 +131,7 @@ def service_get(context, service_id, session=None):
session = get_session()
result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -175,6 +182,24 @@ def service_get_all_by_host(context, host):
@require_admin_context
+def service_get_all_compute_by_host(context, host):
+ topic = 'compute'
+ session = get_session()
+ result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_("%s does not exist or is not "
+ "a compute node.") % host)
+
+ return result
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -285,6 +310,42 @@ def service_update(context, service_id, values):
@require_admin_context
+def compute_node_get(context, compute_id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.ComputeNode).\
+ filter_by(id=compute_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+
+ if not result:
+ raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+
+ return result
+
+
+@require_admin_context
+def compute_node_create(context, values):
+ compute_node_ref = models.ComputeNode()
+ compute_node_ref.update(values)
+ compute_node_ref.save()
+ return compute_node_ref
+
+
+@require_admin_context
+def compute_node_update(context, compute_id, values):
+ session = get_session()
+ with session.begin():
+ compute_ref = compute_node_get(context, compute_id, session=session)
+ compute_ref.update(values)
+ compute_ref.save(session=session)
+
+
+###################
+
+
+@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
@@ -505,6 +566,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -577,18 +648,17 @@ def fixed_ip_disassociate(context, address):
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
- # NOTE(vish): The nested select is because sqlite doesn't support
- # JOINs in UPDATEs.
- result = session.execute('UPDATE fixed_ips SET instance_id = NULL, '
- 'leased = 0 '
- 'WHERE network_id IN (SELECT id FROM networks '
- 'WHERE host = :host) '
- 'AND updated_at < :time '
- 'AND instance_id IS NOT NULL '
- 'AND allocated = 0',
- {'host': host,
- 'time': time})
- return result.rowcount
+ inner_q = session.query(models.Network.id).\
+ filter_by(host=host).\
+ subquery()
+ result = session.query(models.FixedIp).\
+ filter(models.FixedIp.network_id.in_(inner_q)).\
+ filter(models.FixedIp.updated_at < time).\
+ filter(models.FixedIp.instance_id != None).\
+ filter_by(allocated=0).\
+ update({'instance_id': None,
+ 'leased': 0})
+ return result
@require_admin_context
@@ -602,6 +672,22 @@ def fixed_ip_get_all(context, session=None):
return result
+@require_admin_context
+def fixed_ip_get_all_by_host(context, host=None):
+ session = get_session()
+
+ result = session.query(models.FixedIp).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_('No fixed ips for this host defined'))
+
+ return result
+
+
@require_context
def fixed_ip_get_by_address(context, address, session=None):
if not session:
@@ -701,14 +787,16 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- session.execute('update instances set deleted=1,'
- 'deleted_at=:at where id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at where instance_id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -905,6 +993,45 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.vcpus))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.memory_mb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.local_gb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -950,9 +1077,11 @@ def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update key_pairs set deleted=1 where user_id=:id',
- {'id': user_id})
+ session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1055,6 +1184,15 @@ def network_create_safe(context, values):
@require_admin_context
+def network_delete_safe(context, network_id):
+ session = get_session()
+ with session.begin():
+ network_ref = network_get(context, network_id=network_id, \
+ session=session)
+ session.delete(network_ref)
+
+
+@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
'host': None})
@@ -1063,7 +1201,9 @@ def network_disassociate(context, network_id):
@require_admin_context
def network_disassociate_all(context):
session = get_session()
- session.execute('update networks set project_id=NULL')
+ session.query(models.Network).\
+ update({'project_id': None,
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1128,6 +1268,18 @@ def network_get_by_bridge(context, bridge):
@require_admin_context
+def network_get_by_cidr(context, cidr):
+ session = get_session()
+ result = session.query(models.Network).\
+ filter_by(cidr=cidr).first()
+
+ if not result:
+ raise exception.NotFound(_('Network with cidr %s does not exist') %
+ cidr)
+ return result
+
+
+@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
@@ -1433,15 +1585,17 @@ def volume_data_get_for_project(context, project_id):
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update volumes set deleted=1 where id=:id',
- {'id': volume_id})
- session.execute('update export_devices set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
- session.execute('update iscsi_targets set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
+ session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.ExportDevice).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
+ session.query(models.IscsiTarget).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
@require_admin_context
@@ -1501,6 +1655,18 @@ def volume_get_all_by_host(context, host):
all()
+@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -1661,17 +1827,21 @@ def security_group_create(context, values):
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1 where id=:id',
- {'id': security_group_id})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at '
- 'where security_group_id=:id',
- {'id': security_group_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_rules set deleted=1 '
- 'where group_id=:id',
- {'id': security_group_id})
+ session.query(models.SecurityGroup).\
+ filter_by(id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(security_group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ filter_by(group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1679,9 +1849,14 @@ def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1')
- session.execute('update security_group_rules set deleted=1')
+ session.query(models.SecurityGroup).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
###################
@@ -1810,12 +1985,15 @@ def user_create(_context, values):
def user_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_role_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where user_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
@@ -1872,8 +2050,11 @@ def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
+ filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
+ if not user:
+ raise exception.NotFound(_('Invalid user_id %s') % user_id)
return user.projects
@@ -1906,10 +2087,12 @@ def project_update(context, project_id, values):
def project_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where project_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where project_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(project_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(project_id=id).\
+ delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@@ -1934,11 +2117,11 @@ def user_get_roles_for_project(context, user_id, project_id):
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
- session.execute('delete from user_project_role_association where '
- 'user_id=:user_id and project_id=:project_id and '
- 'role=:role', {'user_id': user_id,
- 'project_id': project_id,
- 'role': role})
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ filter_by(role=role).\
+ delete()
def user_remove_role(context, user_id, role):
@@ -2089,8 +2272,9 @@ def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
- session.execute('delete from consoles '
- 'where id=:id', {'id': console_id})
+ session.query(models.Console).\
+ filter_by(id=console_id).\
+ delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
@@ -2246,8 +2430,9 @@ def zone_update(context, zone_id, values):
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
- session.execute('delete from zones '
- 'where id=:id', {'id': zone_id})
+ session.query(models.Zone).\
+ filter_by(id=zone_id).\
+ delete()
@require_admin_context
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
new file mode 100644
index 000000000..eb3066894
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+instances_os_type = Column('os_type',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances.create_column(instances_os_type)
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.os_type == None)\
+ .values(os_type='linux'))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column('os_type')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
new file mode 100644
index 000000000..23ccccb4e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import *
+from nova import log as logging
+from sqlalchemy import *
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+compute_nodes = Table('compute_nodes', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('service_id', Integer(), nullable=False),
+
+ Column('vcpus', Integer(), nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('vcpus_used', Integer(), nullable=False),
+ Column('memory_mb_used', Integer(), nullable=False),
+ Column('local_gb_used', Integer(), nullable=False),
+ Column('hypervisor_type',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('hypervisor_version', Integer(), nullable=False),
+ Column('cpu_info',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ )
+
+
+#
+# Tables to alter
+#
+instances_launched_on = Column(
+ 'launched_on',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ compute_nodes.create()
+ except Exception:
+ logging.info(repr(compute_nodes))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[compute_nodes])
+ raise
+
+ instances.create_column(instances_launched_on)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 6ef284e65..1845e85eb 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -113,6 +113,41 @@ class Service(BASE, NovaBase):
availability_zone = Column(String(255), default='nova')
+class ComputeNode(BASE, NovaBase):
+ """Represents a running compute service on a host."""
+
+ __tablename__ = 'compute_nodes'
+ id = Column(Integer, primary_key=True)
+ service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
+ service = relationship(Service,
+ backref=backref('compute_node'),
+ foreign_keys=service_id,
+ primaryjoin='and_('
+ 'ComputeNode.service_id == Service.id,'
+ 'ComputeNode.deleted == False)')
+
+ vcpus = Column(Integer, nullable=True)
+ memory_mb = Column(Integer, nullable=True)
+ local_gb = Column(Integer, nullable=True)
+ vcpus_used = Column(Integer, nullable=True)
+ memory_mb_used = Column(Integer, nullable=True)
+ local_gb_used = Column(Integer, nullable=True)
+ hypervisor_type = Column(Text, nullable=True)
+ hypervisor_version = Column(Integer, nullable=True)
+
+ # Note(masumotok): Expected Strings example:
+ #
+ # '{"arch":"x86_64",
+ # "model":"Nehalem",
+ # "topology":{"sockets":1, "threads":2, "cores":3},
+ # "features":["tdtscp", "xtpr"]}'
+ #
+ # Points are "json translatable" and it must have all dictionary keys
+ # above, since it is copied from <cpu> tag of getCapabilities()
+ # (See libvirt.virtConnection).
+ cpu_info = Column(Text, nullable=True)
+
+
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
@@ -126,7 +161,7 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
- onset_files = []
+ injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@@ -191,8 +226,13 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ # To remember on which host a instance booted.
+ # An instance may have moved to another host by live migraiton.
+ launched_on = Column(Text)
locked = Column(Boolean)
+ os_type = Column(String(255))
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused