summaryrefslogtreecommitdiffstats
path: root/nova/db
diff options
context:
space:
mode:
Diffstat (limited to 'nova/db')
-rw-r--r--nova/db/api.py10
-rw-r--r--nova/db/sqlalchemy/api.py105
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/189_add_cells_uc.py40
-rw-r--r--nova/db/sqlalchemy/models.py113
-rw-r--r--nova/db/sqlalchemy/utils.py18
6 files changed, 182 insertions, 107 deletions
diff --git a/nova/db/api.py b/nova/db/api.py
index ceab5fcd8..bd519110c 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1151,9 +1151,10 @@ def security_group_get_all(context):
return IMPL.security_group_get_all(context)
-def security_group_get(context, security_group_id):
+def security_group_get(context, security_group_id, columns_to_join=None):
"""Get security group by its id."""
- return IMPL.security_group_get(context, security_group_id)
+ return IMPL.security_group_get(context, security_group_id,
+ columns_to_join)
def security_group_get_by_name(context, project_id, group_name):
@@ -1561,7 +1562,7 @@ def vol_get_usage_by_time(context, begin):
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
- update_totals=False, session=None):
+ update_totals=False):
"""Update cached volume usage for a volume
Creates new record if needed.
@@ -1569,8 +1570,7 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
wr_bytes, instance_id, project_id, user_id,
availability_zone,
- update_totals=update_totals,
- session=session)
+ update_totals=update_totals)
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 664425441..3b491ca6e 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -555,8 +555,7 @@ def compute_node_update(context, compute_id, values, prune_stats=False):
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
- if 'updated_at' not in values:
- values['updated_at'] = timeutils.utcnow()
+ values['updated_at'] = timeutils.utcnow()
convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')
compute_ref.update(values)
return compute_ref
@@ -603,18 +602,6 @@ def compute_node_statistics(context):
@require_admin_context
-def certificate_get(context, certificate_id):
- result = model_query(context, models.Certificate).\
- filter_by(id=certificate_id).\
- first()
-
- if not result:
- raise exception.CertificateNotFound(certificate_id=certificate_id)
-
- return result
-
-
-@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
@@ -1689,7 +1676,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
'changes-since' - only return instances updated after
'deleted' - only return (or exclude) deleted instances
- 'soft-deleted' - modify behavior of 'deleted' to either
+ 'soft_deleted' - modify behavior of 'deleted' to either
include or exclude instances whose
vm_state is SOFT_DELETED.
"""
@@ -2800,6 +2787,7 @@ def _get_quota_usages(context, session, project_id):
@require_context
+@_retry_on_deadlock
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
elevated = context.elevated()
@@ -3135,15 +3123,16 @@ def block_device_mapping_create(context, values, legacy=True):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
+ return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
- _block_device_mapping_get_query(context).\
- filter_by(id=bdm_id).\
- update(values)
+ query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
+ query.update(values)
+ return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
@@ -3159,6 +3148,7 @@ def block_device_mapping_update_or_create(context, values, legacy=True):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save(session=session)
+ result = bdm_ref
else:
values = _from_legacy_values(values, legacy, allow_updates=True)
result.update(values)
@@ -3181,6 +3171,7 @@ def block_device_mapping_update_or_create(context, values, legacy=True):
models.BlockDeviceMapping.guest_format == None,
models.BlockDeviceMapping.guest_format != 'swap')).
soft_delete())
+ return result
@require_context
@@ -3253,13 +3244,17 @@ def security_group_get_all(context):
@require_context
-def security_group_get(context, security_group_id, session=None):
- result = _security_group_get_query(context, session=session,
- project_only=True).\
- filter_by(id=security_group_id).\
- options(joinedload_all('instances')).\
- first()
+def security_group_get(context, security_group_id, columns_to_join=None,
+ session=None):
+ query = _security_group_get_query(context, session=session,
+ project_only=True).\
+ filter_by(id=security_group_id)
+ if columns_to_join is None:
+ columns_to_join = []
+ if 'instances' in columns_to_join:
+ query = query.options(joinedload_all('instances'))
+ result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
@@ -3424,10 +3419,10 @@ def _security_group_rule_get_query(context, session=None):
@require_context
-def security_group_rule_get(context, security_group_rule_id, session=None):
- result = _security_group_rule_get_query(context, session=session).\
- filter_by(id=security_group_rule_id).\
- first()
+def security_group_rule_get(context, security_group_rule_id):
+ result = (_security_group_rule_get_query(context).
+ filter_by(id=security_group_rule_id).
+ first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
@@ -3437,23 +3432,21 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
@require_context
-def security_group_rule_get_by_security_group(context, security_group_id,
- session=None):
- return _security_group_rule_get_query(context, session=session).\
- filter_by(parent_group_id=security_group_id).\
+def security_group_rule_get_by_security_group(context, security_group_id):
+ return (_security_group_rule_get_query(context).
+ filter_by(parent_group_id=security_group_id).
options(joinedload_all('grantee_group.instances.'
- 'system_metadata')).\
- all()
+ 'system_metadata')).
+ all())
@require_context
def security_group_rule_get_by_security_group_grantee(context,
- security_group_id,
- session=None):
+ security_group_id):
- return _security_group_rule_get_query(context, session=session).\
- filter_by(group_id=security_group_id).\
- all()
+ return (_security_group_rule_get_query(context).
+ filter_by(group_id=security_group_id).
+ all())
@require_context
@@ -3466,22 +3459,20 @@ def security_group_rule_create(context, values):
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
- session = get_session()
- with session.begin():
- count = _security_group_rule_get_query(context, session=session).\
- filter_by(id=security_group_rule_id).\
- soft_delete()
- if count == 0:
- raise exception.SecurityGroupNotFoundForRule(
- rule_id=security_group_rule_id)
+ count = (_security_group_rule_get_query(context).
+ filter_by(id=security_group_rule_id).
+ soft_delete())
+ if count == 0:
+ raise exception.SecurityGroupNotFoundForRule(
+ rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
- return model_query(context, models.SecurityGroupIngressRule,
- read_deleted="no").\
- filter_by(parent_group_id=security_group_id).\
- count()
+ return (model_query(context, models.SecurityGroupIngressRule,
+ read_deleted="no").
+ filter_by(parent_group_id=security_group_id).
+ count())
#
###################
@@ -4048,7 +4039,10 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, specs):
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
- cell.save()
+ try:
+ cell.save()
+ except db_exc.DBDuplicateEntry:
+ raise exception.CellExists(name=values['name'])
return cell
@@ -4409,9 +4403,8 @@ def vol_get_usage_by_time(context, begin):
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
- update_totals=False, session=None):
- if not session:
- session = get_session()
+ update_totals=False):
+ session = get_session()
refreshed = timeutils.utcnow()
@@ -4484,6 +4477,8 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
+ current_usage.save(session=session)
+ session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py b/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py
index 88462a2c8..e8b9a1570 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py
@@ -111,9 +111,6 @@ def _upgrade_bdm_v2(meta, bdm_table, bdm_shadow_table):
_bdm_rows_v1 = ('id', 'device_name', 'virtual_name',
'snapshot_id', 'volume_id', 'instance_uuid')
- _bdm_rows_v2 = ('id', 'source_type', 'destination_type', 'guest_format',
- 'device_type', 'disk_bus', 'boot_index', 'image_id')
-
_instance_cols = ('uuid', 'image_ref', 'root_device_name')
def _get_columns(table, names):
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/189_add_cells_uc.py b/nova/db/sqlalchemy/migrate_repo/versions/189_add_cells_uc.py
new file mode 100644
index 000000000..d0606e9f9
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/189_add_cells_uc.py
@@ -0,0 +1,40 @@
+# Copyright 2013 Mirantis Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table
+
+from nova.db.sqlalchemy import utils
+
+
+UC_NAME = 'uniq_cell_name0deleted'
+COLUMNS = ('name', 'deleted')
+TABLE_NAME = 'cells'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+ t = Table(TABLE_NAME, meta, autoload=True)
+
+ utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME,
+ True, *COLUMNS)
+ uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME)
+ uc.create()
+
+
+def downgrade(migrate_engine):
+ utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 815041638..5d9c4cbf3 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -367,11 +367,12 @@ class Quota(BASE, NovaBase):
"""
__tablename__ = 'quotas'
+ __table_args__ = ()
id = Column(Integer, primary_key=True)
- project_id = Column(String(255), index=True)
+ project_id = Column(String(255), nullable=True)
- resource = Column(String(255))
+ resource = Column(String(255), nullable=False)
hard_limit = Column(Integer, nullable=True)
@@ -384,11 +385,14 @@ class QuotaClass(BASE, NovaBase):
"""
__tablename__ = 'quota_classes'
+ __table_args__ = (
+ Index('ix_quota_classes_class_name', 'class_name'),
+ )
id = Column(Integer, primary_key=True)
- class_name = Column(String(255), index=True)
+ class_name = Column(String(255), nullable=True)
- resource = Column(String(255))
+ resource = Column(String(255), nullable=True)
hard_limit = Column(Integer, nullable=True)
@@ -396,13 +400,16 @@ class QuotaUsage(BASE, NovaBase):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
+ __table_args__ = (
+ Index('ix_quota_usages_project_id', 'project_id'),
+ )
id = Column(Integer, primary_key=True)
- project_id = Column(String(255), index=True)
- resource = Column(String(255))
+ project_id = Column(String(255), nullable=True)
+ resource = Column(String(255), nullable=True)
- in_use = Column(Integer)
- reserved = Column(Integer)
+ in_use = Column(Integer, nullable=False)
+ reserved = Column(Integer, nullable=False)
@property
def total(self):
@@ -436,8 +443,9 @@ class Reservation(BASE, NovaBase):
class Snapshot(BASE, NovaBase):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
- id = Column(String(36), primary_key=True)
- deleted = Column(String(36), default="")
+ __table_args__ = ()
+ id = Column(String(36), primary_key=True, nullable=False)
+ deleted = Column(String(36), default="", nullable=True)
@property
def name(self):
@@ -447,16 +455,17 @@ class Snapshot(BASE, NovaBase):
def volume_name(self):
return CONF.volume_name_template % self.volume_id
- user_id = Column(String(255))
- project_id = Column(String(255))
+ user_id = Column(String(255), nullable=True)
+ project_id = Column(String(255), nullable=True)
- volume_id = Column(String(36))
- status = Column(String(255))
- progress = Column(String(255))
- volume_size = Column(Integer)
+ volume_id = Column(String(36), nullable=False)
+ status = Column(String(255), nullable=True)
+ progress = Column(String(255), nullable=True)
+ volume_size = Column(Integer, nullable=True)
+ scheduled_at = Column(DateTime, nullable=True)
- display_name = Column(String(255))
- display_description = Column(String(255))
+ display_name = Column(String(255), nullable=True)
+ display_description = Column(String(255), nullable=True)
class BlockDeviceMapping(BASE, NovaBase):
@@ -686,13 +695,16 @@ class VirtualInterface(BASE, NovaBase):
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address",
- name="unique_virtual_interfaces0address"),
+ name="uniq_virtual_interfaces0address"),
+ Index('network_id', 'network_id'),
+ Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
)
- id = Column(Integer, primary_key=True)
- address = Column(String(255), unique=True)
- network_id = Column(Integer, nullable=False)
- instance_uuid = Column(String(36), nullable=False)
- uuid = Column(String(36))
+ id = Column(Integer, primary_key=True, nullable=False)
+ address = Column(String(255), unique=True, nullable=True)
+ network_id = Column(Integer, nullable=True)
+ instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
+ nullable=True)
+ uuid = Column(String(36), nullable=True)
# TODO(vish): can these both come from the same baseclass?
@@ -753,11 +765,15 @@ class FloatingIp(BASE, NovaBase):
class DNSDomain(BASE, NovaBase):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
+ __table_args__ = (
+ Index('project_id', 'project_id'),
+ Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
+ )
deleted = Column(Boolean, default=False)
- domain = Column(String(512), primary_key=True)
- scope = Column(String(255))
- availability_zone = Column(String(255))
- project_id = Column(String(255))
+ domain = Column(String(255), primary_key=True)
+ scope = Column(String(255), nullable=True)
+ availability_zone = Column(String(255), nullable=True)
+ project_id = Column(String(255), nullable=True)
class ConsolePool(BASE, NovaBase):
@@ -859,6 +875,10 @@ class Cell(BASE, NovaBase):
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
+ __table_args__ = (schema.UniqueConstraint(
+ "name", "deleted", name="uniq_cell_name0deleted"
+ ),
+ )
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
@@ -888,6 +908,9 @@ class AggregateHost(BASE, NovaBase):
class AggregateMetadata(BASE, NovaBase):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
+ __table_args__ = (
+ Index('aggregate_metadata_key_idx', 'key'),
+ )
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
@@ -944,15 +967,19 @@ class AgentBuild(BASE, NovaBase):
class BandwidthUsage(BASE, NovaBase):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
+ __table_args__ = (
+ Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
+ 'start_period'),
+ )
id = Column(Integer, primary_key=True, nullable=False)
- uuid = Column(String(36), nullable=False)
- mac = Column(String(255), nullable=False)
+ uuid = Column(String(36), nullable=True)
+ mac = Column(String(255), nullable=True)
start_period = Column(DateTime, nullable=False)
- last_refreshed = Column(DateTime)
- bw_in = Column(BigInteger)
- bw_out = Column(BigInteger)
- last_ctr_in = Column(BigInteger)
- last_ctr_out = Column(BigInteger)
+ last_refreshed = Column(DateTime, nullable=True)
+ bw_in = Column(BigInteger, nullable=True)
+ bw_out = Column(BigInteger, nullable=True)
+ last_ctr_in = Column(BigInteger, nullable=True)
+ last_ctr_out = Column(BigInteger, nullable=True)
class VolumeUsage(BASE, NovaBase):
@@ -993,6 +1020,7 @@ class VolumeIdMapping(BASE, NovaBase):
class SnapshotIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
+ __table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
@@ -1057,16 +1085,21 @@ class TaskLog(BASE, NovaBase):
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
+ Index('ix_task_log_period_beginning', 'period_beginning'),
+ Index('ix_task_log_host', 'host'),
+ Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
- host = Column(String(255))
- period_beginning = Column(DateTime, default=timeutils.utcnow)
- period_ending = Column(DateTime, default=timeutils.utcnow)
+ host = Column(String(255), nullable=False)
+ period_beginning = Column(DateTime, default=timeutils.utcnow,
+ nullable=False)
+ period_ending = Column(DateTime, default=timeutils.utcnow,
+ nullable=False)
message = Column(String(255), nullable=False)
- task_items = Column(Integer(), default=0)
- errors = Column(Integer(), default=0)
+ task_items = Column(Integer(), default=0, nullable=True)
+ errors = Column(Integer(), default=0, nullable=True)
class InstanceGroupMember(BASE, NovaBase):
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index 67acad2b7..918f50bfd 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -17,7 +17,7 @@
import re
-from migrate.changeset import UniqueConstraint
+from migrate.changeset import UniqueConstraint, ForeignKeyConstraint
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
@@ -73,7 +73,7 @@ def visit_insert_from_select(element, compiler, **kw):
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
- except Exception as e:
+ except Exception:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
@@ -101,7 +101,8 @@ def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
uniques = set([
schema.UniqueConstraint(
- *[getattr(table.c, c.strip()) for c in cols.split(",")], name=name
+ *[getattr(table.c, c.strip(' "'))
+ for c in cols.split(",")], name=name
)
for name, cols in re.findall(regexp, sql_data)
])
@@ -128,7 +129,8 @@ def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
table.constraints.update(uniques)
constraints = [constraint for constraint in table.constraints
- if not constraint.name == uc_name]
+ if not constraint.name == uc_name and
+ not isinstance(constraint, schema.ForeignKeyConstraint)]
new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
new_table.create()
@@ -139,12 +141,20 @@ def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
indexes.append(Index(index["name"],
*column_names,
unique=index["unique"]))
+ f_keys = []
+ for fk in insp.get_foreign_keys(table_name):
+ refcolumns = [fk['referred_table'] + '.' + col
+ for col in fk['referred_columns']]
+ f_keys.append(ForeignKeyConstraint(fk['constrained_columns'],
+ refcolumns, table=new_table, name=fk['name']))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
+ for fkey in f_keys:
+ fkey.create()
new_table.rename(table_name)