diff options
| author | Jenkins <jenkins@review.openstack.org> | 2012-02-10 01:50:02 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2012-02-10 01:50:02 +0000 |
| commit | 511a6219315dc5e9394c3df1e596fda96dbea121 (patch) | |
| tree | 14729d1f8c07308030283e969f127709cf613411 /nova/db | |
| parent | ab568d4d457735a3a20fd1b926c2151da13f09f3 (diff) | |
| parent | 567c0e78a3078cb5a3f200321a6d99173d8afc5f (diff) | |
| download | nova-511a6219315dc5e9394c3df1e596fda96dbea121.tar.gz nova-511a6219315dc5e9394c3df1e596fda96dbea121.tar.xz nova-511a6219315dc5e9394c3df1e596fda96dbea121.zip | |
Merge "Backslash continuations (nova.db)"
Diffstat (limited to 'nova/db')
5 files changed, 31 insertions, 36 deletions
diff --git a/nova/db/base.py b/nova/db/base.py index a1b3bf711..01c39010a 100644 --- a/nova/db/base.py +++ b/nova/db/base.py @@ -23,10 +23,9 @@ from nova import flags from nova.openstack.common import cfg -db_driver_opt = \ - cfg.StrOpt('db_driver', - default='nova.db', - help='driver to use for database access') +db_driver_opt = cfg.StrOpt('db_driver', + default='nova.db', + help='driver to use for database access') FLAGS = flags.FLAGS FLAGS.add_option(db_driver_opt) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b27ac9689..43d6ec9de 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -245,8 +245,7 @@ def service_destroy(context, service_id): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) - if service_ref.topic == 'compute' and \ - len(service_ref.compute_node) != 0: + if service_ref.topic == 'compute' and service_ref.compute_node: for c in service_ref.compute_node: c.delete(session=session) @@ -571,11 +570,11 @@ def compute_node_utilization_update(context, host, free_ram_mb_delta=0, if free_ram_mb_delta != 0: compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta if free_disk_gb_delta != 0: - compute_node.free_disk_gb = table.c.free_disk_gb + \ - free_disk_gb_delta + compute_node.free_disk_gb = (table.c.free_disk_gb + + free_disk_gb_delta) if work_delta != 0: - compute_node.current_workload = table.c.current_workload + \ - work_delta + compute_node.current_workload = (table.c.current_workload + + work_delta) if vm_delta != 0: compute_node.running_vms = table.c.running_vms + vm_delta return compute_node @@ -1487,7 +1486,7 @@ def instance_get_all_by_filters(context, filters): otherwise""" def _regexp_filter_by_metadata(instance, meta): - inst_metadata = [{node['key']: node['value']} \ + inst_metadata = [{node['key']: node['value']} for node in instance['metadata']] if isinstance(meta, list): for node in meta: @@ -1829,8 +1828,7 @@ def instance_info_cache_update(context, instance_uuid, values, # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry values['instance_id'] = instance_uuid - info_cache = \ - instance_info_cache_create(context, values) + info_cache = instance_info_cache_create(context, values) return info_cache @@ -3387,8 +3385,8 @@ def _dict_with_extra_specs(inst_type_query): """ inst_type_dict = dict(inst_type_query) - extra_specs = dict([(x['key'], x['value']) for x in \ - inst_type_query['extra_specs']]) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict @@ -4138,7 +4136,7 @@ def sm_backend_conf_get(context, sm_backend_id): first() if not result: - raise exception.NotFound(_("No backend config with id "\ + raise exception.NotFound(_("No backend config with id " "%(sm_backend_id)s") % locals()) return result diff --git a/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py index 4e8eaf0fd..b4a3be3db 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py @@ -37,8 +37,8 @@ def upgrade(migrate_engine): if migrate_engine.name == "mysql": try: - migrate_engine.execute("ALTER TABLE migrations DROP FOREIGN KEY " \ - "`migrations_ibfk_1`;") + migrate_engine.execute("ALTER TABLE migrations DROP FOREIGN KEY " + "`migrations_ibfk_1`;") except Exception: # Don't care, just fail silently. pass diff --git a/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py b/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py index 122b60d2f..e7750e24b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py @@ -34,12 +34,11 @@ def upgrade(migrate_engine): instance_types.create_column(rxtx_factor) networks.create_column(rxtx_base) - base = migrate_engine.execute("select min(rxtx_cap) as min_rxtx from "\ - "instance_types where rxtx_cap > 0")\ - .scalar() + base = migrate_engine.execute("select min(rxtx_cap) as min_rxtx from " + "instance_types where rxtx_cap > 0").scalar() base = base if base > 1 else 1 - update_i_type_sql = "update instance_types set rxtx_factor = rxtx_cap"\ - "/%s where rxtx_cap > 0" % base + update_i_type_sql = ("update instance_types set rxtx_factor = rxtx_cap" + "/%s where rxtx_cap > 0" % base) migrate_engine.execute(update_i_type_sql) migrate_engine.execute("update networks set rxtx_base = %s" % base) @@ -55,12 +54,12 @@ def downgrade(migrate_engine): instance_types.create_column(rxtx_quota) instance_types.create_column(rxtx_cap) - base = migrate_engine.execute("select min(rxtx_base) from networks "\ + base = migrate_engine.execute("select min(rxtx_base) from networks " "where rxtx_base > 0").scalar() base = base if base > 1 else 1 - update_i_type_sql = "update instance_types set rxtx_cap = "\ - "rxtx_factor * %s" % base + update_i_type_sql = ("update instance_types set rxtx_cap = " + "rxtx_factor * %s" % base) migrate_engine.execute(update_i_type_sql) instance_types.c.rxtx_factor.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py b/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py index a9a9db96e..f62c3c38f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py @@ -75,8 +75,8 @@ def upgrade(migrate_engine): # fixed ips have floating ips, so here they are for fixed_ip in fixed_ip_list: fixed_ip['version'] = 4 - fixed_ip['floating_ips'] =\ - get_floating_ips_by_fixed_ip_id(fixed_ip['id']) + fixed_ip['floating_ips'] = get_floating_ips_by_fixed_ip_id( + fixed_ip['id']) fixed_ip['type'] = 'fixed' del fixed_ip['id'] @@ -171,8 +171,7 @@ def upgrade(migrate_engine): network['meta']['multi_host'] = network['multi_host'] del network['multi_host'] if network['bridge_interface']: - network['meta']['bridge_interface'] = \ - network['bridge_interface'] + network['meta']['bridge_interface'] = network['bridge_interface'] del network['bridge_interface'] if network['vlan']: network['meta']['vlan'] = network['vlan'] @@ -200,14 +199,14 @@ def upgrade(migrate_engine): # instances have vifs so aninstance nw_info is # is a list of dicts, 1 dict for each vif nw_info = get_vifs_by_instance_id(instance_id) - logging.info("VIFs for Instance %s: \n %s" % \ - (instance['uuid'], nw_info)) + logging.info("VIFs for Instance %s: \n %s" % + (instance['uuid'], nw_info)) for vif in nw_info: networks_ = get_network_by_id(vif['network_id']) if networks_: network = networks_[0] - logging.info("Network for Instance %s: \n %s" % \ - (instance['uuid'], network)) + logging.info("Network for Instance %s: \n %s" % + (instance['uuid'], network)) _update_network(vif, network) else: network = None @@ -219,8 +218,8 @@ def upgrade(migrate_engine): # vif['meta'] could also be set to contain rxtx data here # but it isn't exposed in the api and is still being rewritten - logging.info("VIF network for instance %s: \n %s" % \ - (instance['uuid'], vif['network'])) + logging.info("VIF network for instance %s: \n %s" % + (instance['uuid'], vif['network'])) # jsonify nw_info row = {'created_at': utils.utcnow(), |
