diff options
| author | Soren Hansen <soren.hansen@rackspace.com> | 2010-09-10 15:02:07 +0200 |
|---|---|---|
| committer | Soren Hansen <soren.hansen@rackspace.com> | 2010-09-10 15:02:07 +0200 |
| commit | d64adee4656a3044258c7dbfff93f5201c39560c (patch) | |
| tree | 26b1419d5fd8c8f98a55de70461dbe342b133fab /nova | |
| parent | c3dd0aa79d982d8f34172e6023d4b632ea23f2b9 (diff) | |
| parent | 33d832ee798bc9530be577e3234ff8bcdac4939e (diff) | |
| download | nova-d64adee4656a3044258c7dbfff93f5201c39560c.tar.gz nova-d64adee4656a3044258c7dbfff93f5201c39560c.tar.xz nova-d64adee4656a3044258c7dbfff93f5201c39560c.zip | |
Merge with orm_deux (fixing up style changes in my stuff at the same time).
Diffstat (limited to 'nova')
| -rw-r--r-- | nova/api/rackspace/servers.py | 29 | ||||
| -rw-r--r-- | nova/compute/manager.py | 33 | ||||
| -rw-r--r-- | nova/db/api.py | 13 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 472 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/models.py | 139 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/session.py | 44 | ||||
| -rw-r--r-- | nova/endpoint/admin.py | 3 | ||||
| -rw-r--r-- | nova/endpoint/cloud.py | 72 | ||||
| -rw-r--r-- | nova/flags.py | 11 | ||||
| -rw-r--r-- | nova/network/linux_net.py | 26 | ||||
| -rw-r--r-- | nova/network/manager.py | 1 | ||||
| -rw-r--r-- | nova/service.py | 3 | ||||
| -rw-r--r-- | nova/test.py | 6 | ||||
| -rw-r--r-- | nova/tests/access_unittest.py | 2 | ||||
| -rw-r--r-- | nova/tests/auth_unittest.py | 3 | ||||
| -rw-r--r-- | nova/tests/cloud_unittest.py | 3 | ||||
| -rw-r--r-- | nova/tests/compute_unittest.py | 5 | ||||
| -rw-r--r-- | nova/tests/fake_flags.py | 5 | ||||
| -rw-r--r-- | nova/tests/network_unittest.py | 8 | ||||
| -rw-r--r-- | nova/tests/real_flags.py | 1 | ||||
| -rw-r--r-- | nova/tests/service_unittest.py | 72 | ||||
| -rw-r--r-- | nova/tests/storage_unittest.py | 115 | ||||
| -rw-r--r-- | nova/tests/volume_unittest.py | 18 | ||||
| -rw-r--r-- | nova/utils.py | 2 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 29 | ||||
| -rw-r--r-- | nova/volume/driver.py | 4 | ||||
| -rw-r--r-- | nova/volume/manager.py | 12 |
27 files changed, 506 insertions, 625 deletions
diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py index 603a18944..44174ca52 100644 --- a/nova/api/rackspace/servers.py +++ b/nova/api/rackspace/servers.py @@ -14,27 +14,31 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import time +from nova import db +from nova import flags from nova import rpc +from nova import utils from nova.api.rackspace import base -# FIXME(vish): convert from old usage of instance directory +FLAGS = flags.FLAGS class Controller(base.Controller): entity_name = 'servers' def index(self, **kwargs): instances = [] - for inst in compute.InstanceDirectory().all: + for inst in db.instance_get_all(None): instances.append(instance_details(inst)) def show(self, **kwargs): instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) + return db.instance_get(None, instance_id) def delete(self, **kwargs): instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) + instance = db.instance_get(None, instance_id) if not instance: raise ServerNotFound("The requested server was not found") instance.destroy() @@ -45,11 +49,11 @@ class Controller(base.Controller): rpc.cast( FLAGS.compute_topic, { "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) + "args": {"instance_id": inst['id']}}) def update(self, **kwargs): instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) + instance = db.instance_get(None, instance_id) if not instance: raise ServerNotFound("The requested server was not found") instance.update(kwargs['server']) @@ -59,7 +63,7 @@ class Controller(base.Controller): """Build instance data structure and save it to the data store.""" reservation = utils.generate_uid('r') ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() + inst = {} inst['name'] = env['server']['name'] inst['image_id'] = env['server']['imageId'] inst['instance_type'] = env['server']['flavorId'] @@ -68,15 +72,8 @@ class Controller(base.Controller): inst['reservation_id'] = reservation inst['launch_time'] = ltime inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] + inst_id = db.instance_create(None, inst) + address = self.network_manager.allocate_fixed_ip(None, inst_id) # key_data, key_name, ami_launch_index # TODO(todd): key data or root password inst.save() diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 13e5dcd1f..5f7a94106 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -59,7 +59,7 @@ class ComputeManager(manager.Manager): # FIXME(ja): include other fields from state? instance_ref = self.db.instance_get(context, instance_id) state = self.driver.get_info(instance_ref.name)['state'] - self.db.instance_state(context, instance_id, state) + self.db.instance_set_state(context, instance_id, state) @defer.inlineCallbacks @exception.wrap_exception @@ -76,17 +76,19 @@ class ComputeManager(manager.Manager): {'host': self.host}) # TODO(vish) check to make sure the availability zone matches - self.db.instance_state(context, - instance_id, - power_state.NOSTATE, - 'spawning') + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'spawning') try: yield self.driver.spawn(instance_ref) - except: # pylint: disable-msg=W0702 + except Exception: # pylint: disable-msg=W0702 logging.exception("instance %s: Failed to spawn", instance_ref['name']) - self.db.instance_state(context, instance_id, power_state.SHUTDOWN) + self.db.instance_set_state(context, + instance_id, + power_state.SHUTDOWN) self._update_state(context, instance_id) @@ -97,16 +99,15 @@ class ComputeManager(manager.Manager): logging.debug("instance %s: terminating", instance_id) instance_ref = self.db.instance_get(context, instance_id) - # TODO(vish): move this logic to layer? if instance_ref['state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) raise exception.Error('trying to destroy already destroyed' ' instance: %s' % instance_id) - self.db.instance_state(context, - instance_id, - power_state.NOSTATE, - 'shutting_down') + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'shutting_down') yield self.driver.destroy(instance_ref) # TODO(ja): should we keep it in a terminated state for a bit? @@ -128,10 +129,10 @@ class ComputeManager(manager.Manager): power_state.RUNNING)) logging.debug('instance %s: rebooting', instance_ref['name']) - self.db.instance_state(context, - instance_id, - power_state.NOSTATE, - 'rebooting') + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'rebooting') yield self.driver.reboot(instance_ref) self._update_state(context, instance_id) diff --git a/nova/db/api.py b/nova/db/api.py index c7a6da183..2bcf0bd2b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -30,10 +30,9 @@ flags.DEFINE_string('db_backend', 'sqlalchemy', IMPL = utils.LazyPluggable(FLAGS['db_backend'], - sqlalchemy='nova.db.sqlalchemy.api') + sqlalchemy='nova.db.sqlalchemy.api') -# TODO(vish): where should these exceptions go? class NoMoreAddresses(exception.Error): """No more available addresses""" pass @@ -87,9 +86,9 @@ def floating_ip_allocate_address(context, host, project_id): return IMPL.floating_ip_allocate_address(context, host, project_id) -def floating_ip_create(context, address, host): - """Create a floating ip for a given address on the specified host.""" - return IMPL.floating_ip_create(context, address, host) +def floating_ip_create(context, values): + """Create a floating ip from the values dictionary.""" + return IMPL.floating_ip_create(context, values) def floating_ip_disassociate(context, address): @@ -231,9 +230,9 @@ def instance_is_vpn(context, instance_id): return IMPL.instance_is_vpn(context, instance_id) -def instance_state(context, instance_id, state, description=None): +def instance_set_state(context, instance_id, state, description=None): """Set the state of an instance.""" - return IMPL.instance_state(context, instance_id, state, description) + return IMPL.instance_set_state(context, instance_id, state, description) def instance_update(context, instance_id, values): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 622e76cd7..1c95efd83 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -23,7 +23,7 @@ from nova import db from nova import exception from nova import flags from nova.db.sqlalchemy import models -from nova.db.sqlalchemy.session import managed_session +from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ from sqlalchemy.orm import eagerload @@ -52,55 +52,58 @@ def service_create(_context, values): return service_ref.id -def service_update(context, service_id, values): - service_ref = service_get(context, service_id) - for (key, value) in values.iteritems(): - service_ref[key] = value - service_ref.save() +def service_update(_context, service_id, values): + session = get_session() + with session.begin(): + service_ref = models.Service.find(service_id, session=session) + for (key, value) in values.iteritems(): + service_ref[key] = value + service_ref.save(session=session) ################### def floating_ip_allocate_address(_context, host, project_id): - with managed_session(autocommit=False) as session: - floating_ip_ref = session.query(models.FloatingIp) \ - .filter_by(host=host) \ - .filter_by(fixed_ip_id=None) \ - .filter_by(deleted=False) \ - .with_lockmode('update') \ - .first() + session = get_session() + with session.begin(): + floating_ip_ref = session.query(models.FloatingIp + ).filter_by(host=host + ).filter_by(fixed_ip_id=None + ).filter_by(deleted=False + ).with_lockmode('update' + ).first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise db.NoMoreAddresses() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) - session.commit() - return floating_ip_ref['address'] + return floating_ip_ref['address'] -def floating_ip_create(_context, address, host): +def floating_ip_create(_context, values): floating_ip_ref = models.FloatingIp() - floating_ip_ref['address'] = address - floating_ip_ref['host'] = host + for (key, value) in values.iteritems(): + floating_ip_ref[key] = value floating_ip_ref.save() - return floating_ip_ref + return floating_ip_ref['address'] def floating_ip_fixed_ip_associate(_context, floating_address, fixed_address): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): floating_ip_ref = models.FloatingIp.find_by_str(floating_address, session=session) fixed_ip_ref = models.FixedIp.find_by_str(fixed_address, session=session) floating_ip_ref.fixed_ip = fixed_ip_ref floating_ip_ref.save(session=session) - session.commit() def floating_ip_disassociate(_context, address): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) fixed_ip_ref = floating_ip_ref.fixed_ip @@ -110,12 +113,12 @@ def floating_ip_disassociate(_context, address): fixed_ip_address = None floating_ip_ref.fixed_ip = None floating_ip_ref.save(session=session) - session.commit() - return fixed_ip_address + return fixed_ip_address def floating_ip_deallocate(_context, address): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) floating_ip_ref['project_id'] = None @@ -127,7 +130,8 @@ def floating_ip_get_by_address(_context, address): def floating_ip_get_instance(_context, address): - with managed_session() as session: + session = get_session() + with session.begin(): floating_ip_ref = models.FloatingIp.find_by_str(address, session=session) return floating_ip_ref.fixed_ip.instance @@ -137,27 +141,28 @@ def floating_ip_get_instance(_context, address): def fixed_ip_allocate(_context, network_id): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) - fixed_ip_ref = session.query(models.FixedIp) \ - .filter(network_or_none) \ - .filter_by(reserved=False) \ - .filter_by(allocated=False) \ - .filter_by(leased=False) \ - .filter_by(deleted=False) \ - .with_lockmode('update') \ - .first() + fixed_ip_ref = session.query(models.FixedIp + ).filter(network_or_none + ).filter_by(reserved=False + ).filter_by(allocated=False + ).filter_by(leased=False + ).filter_by(deleted=False + ).with_lockmode('update' + ).first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise db.NoMoreAddresses() if not fixed_ip_ref.network: - fixed_ip_ref.network = models.Network.find(network_id) + fixed_ip_ref.network = models.Network.find(network_id, + session=session) fixed_ip_ref['allocated'] = True session.add(fixed_ip_ref) - session.commit() - return fixed_ip_ref['address'] + return fixed_ip_ref['address'] def fixed_ip_create(_context, values): @@ -173,43 +178,45 @@ def fixed_ip_get_by_address(_context, address): def fixed_ip_get_instance(_context, address): - with managed_session() as session: + session = get_session() + with session.begin(): return models.FixedIp.find_by_str(address, session=session).instance def fixed_ip_get_network(_context, address): - with managed_session() as session: + session = get_session() + with session.begin(): return models.FixedIp.find_by_str(address, session=session).network def fixed_ip_deallocate(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - fixed_ip_ref['allocated'] = False - fixed_ip_ref.save() + db.fixed_ip_update(context, address, {'allocated': False}) def fixed_ip_instance_associate(_context, address, instance_id): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) instance_ref = models.Instance.find(instance_id, session=session) fixed_ip_ref.instance = instance_ref fixed_ip_ref.save(session=session) - session.commit() def fixed_ip_instance_disassociate(_context, address): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) fixed_ip_ref.instance = None fixed_ip_ref.save(session=session) - session.commit() -def fixed_ip_update(context, address, values): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - for (key, value) in values.iteritems(): - fixed_ip_ref[key] = value - fixed_ip_ref.save() +def fixed_ip_update(_context, address, values): + session = get_session() + with session.begin(): + fixed_ip_ref = models.FixedIp.find_by_str(address, session=session) + for (key, value) in values.iteritems(): + fixed_ip_ref[key] = value + fixed_ip_ref.save(session=session) ################### @@ -223,9 +230,11 @@ def instance_create(_context, values): return instance_ref.id -def instance_destroy(context, instance_id): - instance_ref = instance_get(context, instance_id) - instance_ref.delete() +def instance_destroy(_context, instance_id): + session = get_session() + with session.begin(): + instance_ref = models.Instance.find(instance_id, session=session) + instance_ref.delete(session=session) def instance_get(_context, instance_id): @@ -237,19 +246,19 @@ def instance_get_all(_context): def instance_get_by_project(_context, project_id): - with managed_session() as session: - return session.query(models.Instance) \ - .filter_by(project_id=project_id) \ - .filter_by(deleted=False) \ - .all() + session = get_session() + return session.query(models.Instance + ).filter_by(project_id=project_id + ).filter_by(deleted=False + ).all() def instance_get_by_reservation(_context, reservation_id): - with managed_session() as session: - return session.query(models.Instance) \ - .filter_by(reservation_id=reservation_id) \ - .filter_by(deleted=False) \ - .all() + session = get_session() + return session.query(models.Instance + ).filter_by(reservation_id=reservation_id + ).filter_by(deleted=False + ).all() def instance_get_by_str(_context, str_id): @@ -257,7 +266,8 @@ def instance_get_by_str(_context, str_id): def instance_get_fixed_address(_context, instance_id): - with managed_session() as session: + session = get_session() + with session.begin(): instance_ref = models.Instance.find(instance_id, session=session) if not instance_ref.fixed_ip: return None @@ -265,7 +275,8 @@ def instance_get_fixed_address(_context, instance_id): def instance_get_floating_address(_context, instance_id): - with managed_session() as session: + session = get_session() + with session.begin(): instance_ref = models.Instance.find(instance_id, session=session) if not instance_ref.fixed_ip: return None @@ -281,20 +292,29 @@ def instance_get_host(context, instance_id): def instance_is_vpn(context, instance_id): + # TODO(vish): Move this into image code somewhere instance_ref = instance_get(context, instance_id) return instance_ref['image_id'] == FLAGS.vpn_image_id -def instance_state(context, instance_id, state, description=None): - instance_ref = instance_get(context, instance_id) - instance_ref.set_state(state, description) +def instance_set_state(context, instance_id, state, description=None): + # TODO(devcamcar): Move this out of models and into driver + from nova.compute import power_state + if not description: + description = power_state.name(state) + db.instance_update(context, + instance_id, + {'state': state, + 'state_description': description}) -def instance_update(context, instance_id, values): - instance_ref = instance_get(context, instance_id) - for (key, value) in values.iteritems(): - instance_ref[key] = value - instance_ref.save() +def instance_update(_context, instance_id, values): + session = get_session() + with session.begin(): + instance_ref = models.Instance.find(instance_id, session=session) + for (key, value) in values.iteritems(): + instance_ref[key] = value + instance_ref.save(session=session) ################### @@ -305,31 +325,31 @@ def network_count(_context): def network_count_allocated_ips(_context, network_id): - with managed_session() as session: - return session.query(models.FixedIp) \ - .filter_by(network_id=network_id) \ - .filter_by(allocated=True) \ - .filter_by(deleted=False) \ - .count() + session = get_session() + return session.query(models.FixedIp + ).filter_by(network_id=network_id + ).filter_by(allocated=True + ).filter_by(deleted=False + ).count() def network_count_available_ips(_context, network_id): - with managed_session() as session: - return session.query(models.FixedIp) \ - .filter_by(network_id=network_id) \ - .filter_by(allocated=False) \ - .filter_by(reserved=False) \ - .filter_by(deleted=False) \ - .count() + session = get_session() + return session.query(models.FixedIp + ).filter_by(network_id=network_id + ).filter_by(allocated=False + ).filter_by(reserved=False + ).filter_by(deleted=False + ).count() def network_count_reserved_ips(_context, network_id): - with managed_session() as session: - return session.query(models.FixedIp) \ - .filter_by(network_id=network_id) \ - .filter_by(reserved=True) \ - .filter_by(deleted=False) \ - .count() + session = get_session() + return session.query(models.FixedIp + ).filter_by(network_id=network_id + ).filter_by(reserved=True + ).filter_by(deleted=False + ).count() def network_create(_context, values): @@ -341,7 +361,8 @@ def network_create(_context, values): def network_destroy(_context, network_id): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): # TODO(vish): do we have to use sql here? session.execute('update networks set deleted=1 where id=:id', {'id': network_id}) @@ -355,32 +376,33 @@ def network_destroy(_context, network_id): session.execute('update network_indexes set network_id=NULL ' 'where network_id=:id', {'id': network_id}) - session.commit() def network_get(_context, network_id): return models.Network.find(network_id) +# NOTE(vish): pylint complains because of the long method name, but +# it fits with the names of the rest of the methods # pylint: disable-msg=C0103 def network_get_associated_fixed_ips(_context, network_id): - with managed_session() as session: - return session.query(models.FixedIp) \ - .filter_by(network_id=network_id) \ - .filter(models.FixedIp.instance_id != None) \ - .filter_by(deleted=False) \ - .all() + session = get_session() + return session.query(models.FixedIp + ).filter_by(network_id=network_id + ).filter(models.FixedIp.instance_id != None + ).filter_by(deleted=False + ).all() def network_get_by_bridge(_context, bridge): - with managed_session() as session: - rv = session.query(models.Network) \ - .filter_by(bridge=bridge) \ - .filter_by(deleted=False) \ - .first() - if not rv: - raise exception.NotFound('No network for bridge %s' % bridge) - return rv + session = get_session() + rv = session.query(models.Network + ).filter_by(bridge=bridge + ).filter_by(deleted=False + ).first() + if not rv: + raise exception.NotFound('No network for bridge %s' % bridge) + return rv def network_get_host(context, network_id): @@ -389,19 +411,19 @@ def network_get_host(context, network_id): def network_get_index(_context, network_id): - with managed_session(autocommit=False) as session: - network_index = session.query(models.NetworkIndex) \ - .filter_by(network_id=None) \ - .filter_by(deleted=False) \ - .with_lockmode('update') \ - .first() + session = get_session() + with session.begin(): + network_index = session.query(models.NetworkIndex + ).filter_by(network_id=None + ).filter_by(deleted=False + ).with_lockmode('update' + ).first() if not network_index: raise db.NoMoreNetworks() network_index['network'] = models.Network.find(network_id, session=session) session.add(network_index) - session.commit() - return network_index['index'] + return network_index['index'] def network_index_count(_context): @@ -416,45 +438,45 @@ def network_index_create(_context, values): def network_set_host(_context, network_id, host_id): - with managed_session(autocommit=False) as session: - network = session.query(models.Network) \ - .filter_by(id=network_id) \ - .filter_by(deleted=False) \ - .with_lockmode('update') \ - .first() + session = get_session() + with session.begin(): + network = session.query(models.Network + ).filter_by(id=network_id + ).filter_by(deleted=False + ).with_lockmode('update' + ).first() if not network: raise exception.NotFound("Couldn't find network with %s" % network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues - if network.host: - session.commit() - return network['host'] - network['host'] = host_id - session.add(network) - session.commit() - return network['host'] + if not network['host']: + network['host'] = host_id + session.add(network) + return network['host'] -def network_update(context, network_id, values): - network_ref = network_get(context, network_id) - for (key, value) in values.iteritems(): - network_ref[key] = value - network_ref.save() +def network_update(_context, network_id, values): + session = get_session() + with session.begin(): + network_ref = models.Network.find(network_id, session=session) + for (key, value) in values.iteritems(): + network_ref[key] = value + network_ref.save(session=session) ################### def project_get_network(_context, project_id): - with managed_session() as session: - rv = session.query(models.Network) \ - .filter_by(project_id=project_id) \ - .filter_by(deleted=False) \ - .first() - if not rv: - raise exception.NotFound('No network for project: %s' % project_id) - return rv + session = get_session() + rv = session.query(models.Network + ).filter_by(project_id=project_id + ).filter_by(deleted=False + ).first() + if not rv: + raise exception.NotFound('No network for project: %s' % project_id) + return rv ################### @@ -483,29 +505,32 @@ def export_device_create(_context, values): def volume_allocate_shelf_and_blade(_context, volume_id): - with managed_session(autocommit=False) as session: - export_device = session.query(models.ExportDevice) \ - .filter_by(volume=None) \ - .filter_by(deleted=False) \ - .with_lockmode('update') \ - .first() + session = get_session() + with session.begin(): + export_device = session.query(models.ExportDevice + ).filter_by(volume=None + ).filter_by(deleted=False + ).with_lockmode('update' + ).first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not export_device: raise db.NoMoreBlades() export_device.volume_id = volume_id session.add(export_device) - session.commit() - return (export_device.shelf_id, export_device.blade_id) + return (export_device.shelf_id, export_device.blade_id) -def volume_attached(context, volume_id, instance_id, mountpoint): - volume_ref = volume_get(context, volume_id) - volume_ref.instance_id = instance_id - volume_ref['status'] = 'in-use' - volume_ref['mountpoint'] = mountpoint - volume_ref['attach_status'] = 'attached' - volume_ref.save() +def volume_attached(_context, volume_id, instance_id, mountpoint): + session = get_session() + with session.begin(): + volume_ref = models.Volume.find(volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref.instance = models.Instance.find(instance_id, + session=session) + volume_ref.save(session=session) def volume_create(_context, values): @@ -517,23 +542,25 @@ def volume_create(_context, values): def volume_destroy(_context, volume_id): - with managed_session(autocommit=False) as session: + session = get_session() + with session.begin(): # TODO(vish): do we have to use sql here? session.execute('update volumes set deleted=1 where id=:id', {'id': volume_id}) session.execute('update export_devices set volume_id=NULL ' 'where volume_id=:id', {'id': volume_id}) - session.commit() -def volume_detached(context, volume_id): - volume_ref = volume_get(context, volume_id) - volume_ref['instance_id'] = None - volume_ref['mountpoint'] = None - volume_ref['status'] = 'available' - volume_ref['attach_status'] = 'detached' - volume_ref.save() +def volume_detached(_context, volume_id): + session = get_session() + with session.begin(): + volume_ref = models.Volume.find(volume_id, session=session) + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref.instance = None + volume_ref.save(session=session) def volume_get(_context, volume_id): @@ -545,11 +572,11 @@ def volume_get_all(_context): def volume_get_by_project(_context, project_id): - with managed_session() as session: - return session.query(models.Volume) \ - .filter_by(project_id=project_id) \ - .filter_by(deleted=False) \ - .all() + session = get_session() + return session.query(models.Volume + ).filter_by(project_id=project_id + ).filter_by(deleted=False + ).all() def volume_get_by_str(_context, str_id): @@ -561,27 +588,29 @@ def volume_get_host(context, volume_id): return volume_ref['host'] -def volume_get_instance(context, volume_id): - volume_ref = db.volume_get(context, volume_id) - instance_ref = db.instance_get(context, volume_ref['instance_id']) - return instance_ref +def volume_get_instance(_context, volume_id): + session = get_session() + with session.begin(): + return models.Volume.find(volume_id, session=session).instance def volume_get_shelf_and_blade(_context, volume_id): - with managed_session() as session: - export_device = session.query(models.ExportDevice) \ - .filter_by(volume_id=volume_id) \ - .first() - if not export_device: - raise exception.NotFound() - return (export_device.shelf_id, export_device.blade_id) + session = get_session() + export_device = session.query(models.ExportDevice + ).filter_by(volume_id=volume_id + ).first() + if not export_device: + raise exception.NotFound() + return (export_device.shelf_id, export_device.blade_id) -def volume_update(context, volume_id, values): - volume_ref = volume_get(context, volume_id) - for (key, value) in values.iteritems(): - volume_ref[key] = value - volume_ref.save() +def volume_update(_context, volume_id, values): + session = get_session() + with session.begin(): + volume_ref = models.Volume.find(volume_id, session=session) + for (key, value) in values.iteritems(): + volume_ref[key] = value + volume_ref.save(session=session) ################### @@ -596,48 +625,48 @@ def security_group_create(_context, values): def security_group_get_by_id(_context, security_group_id): - with managed_session() as session: - return session.query(models.SecurityGroup) \ - .options(eagerload('rules')) \ - .get(security_group_id) + session = get_session() + with session.begin(): + return session.query(models.SecurityGroup + ).options(eagerload('rules') + ).get(security_group_id) def security_group_get_by_instance(_context, instance_id): - with managed_session() as session: - return session.query(models.Instance) \ - .get(instance_id) \ - .security_groups \ + session = get_session() + with session.begin(): + return session.query(models.Instance + ).get(instance_id + ).security_groups \ .all() def security_group_get_by_user(_context, user_id): - with managed_session() as session: - return session.query(models.SecurityGroup) \ - .filter_by(user_id=user_id) \ - .filter_by(deleted=False) \ - .options(eagerload('rules')) \ - .all() + session = get_session() + with session.begin(): + return session.query(models.SecurityGroup + ).filter_by(user_id=user_id + ).filter_by(deleted=False + ).options(eagerload('rules') + ).all() def security_group_get_by_user_and_name(_context, user_id, name): - with managed_session() as session: - return session.query(models.SecurityGroup) \ - .filter_by(user_id=user_id) \ - .filter_by(name=name) \ - .filter_by(deleted=False) \ - .options(eagerload('rules')) \ - .one() + session = get_session() + with session.begin(): + return session.query(models.SecurityGroup + ).filter_by(user_id=user_id + ).filter_by(name=name + ).filter_by(deleted=False + ).options(eagerload('rules') + ).one() def security_group_destroy(_context, security_group_id): - with managed_session() as session: - security_group = session.query(models.SecurityGroup) \ - .get(security_group_id) + session = get_session() + with session.begin(): + security_group = session.query(models.SecurityGroup + ).get(security_group_id) security_group.delete(session=session) -def security_group_get_all(_context): - return models.SecurityGroup.all() - - - ################### @@ -650,7 +679,8 @@ def security_group_rule_create(_context, values): return security_group_rule_ref def security_group_rule_destroy(_context, security_group_rule_id): - with managed_session() as session: - security_group_rule = session.query(models.SecurityGroupIngressRule) \ - .get(security_group_rule_id) + session = get_session() + with session.begin(): + security_group_rule = session.query(models.SecurityGroupIngressRule + ).get(security_group_rule_id) security_group_rule.delete(session=session) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 27c8e4d4c..f27520aa8 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -30,7 +30,7 @@ from sqlalchemy import Column, Integer, String, Table from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base -from nova.db.sqlalchemy.session import managed_session +from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception @@ -53,40 +53,34 @@ class NovaBase(object): @classmethod def all(cls, session=None): """Get all objects of this type""" - if session: - return session.query(cls) \ - .filter_by(deleted=False) \ - .all() - else: - with managed_session() as sess: - return cls.all(session=sess) + if not session: + session = get_session() + return session.query(cls + ).filter_by(deleted=False + ).all() @classmethod def count(cls, session=None): """Count objects of this type""" - if session: - return session.query(cls) \ - .filter_by(deleted=False) \ - .count() - else: - with managed_session() as sess: - return cls.count(session=sess) + if not session: + session = get_session() + return session.query(cls + ).filter_by(deleted=False + ).count() @classmethod def find(cls, obj_id, session=None): """Find object by id""" - if session: - try: - return session.query(cls) \ - .filter_by(id=obj_id) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - new_exc = exception.NotFound("No model for id %s" % obj_id) - raise new_exc.__class__, new_exc, sys.exc_info()[2] - else: - with managed_session() as sess: - return cls.find(obj_id, session=sess) + if not session: + session = get_session() + try: + return session.query(cls + ).filter_by(id=obj_id + ).filter_by(deleted=False + ).one() + except exc.NoResultFound: + new_exc = exception.NotFound("No model for id %s" % obj_id) + raise new_exc.__class__, new_exc, sys.exc_info()[2] @classmethod def find_by_str(cls, str_id, session=None): @@ -101,12 +95,10 @@ class NovaBase(object): def save(self, session=None): """Save this object""" - if session: - session.add(self) - session.flush() - else: - with managed_session() as sess: - self.save(session=sess) + if not session: + session = get_session() + session.add(self) + session.flush() def delete(self, session=None): """Delete this object""" @@ -175,20 +167,18 @@ class Service(BASE, NovaBase): @classmethod def find_by_args(cls, host, binary, session=None): - if session: - try: - return session.query(cls) \ - .filter_by(host=host) \ - .filter_by(binary=binary) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - new_exc = exception.NotFound("No model for %s, %s" % (host, - binary)) - raise new_exc.__class__, new_exc, sys.exc_info()[2] - else: - with managed_session() as sess: - return cls.find_by_args(host, binary, session=sess) + if not session: + session = get_session() + try: + return session.query(cls + ).filter_by(host=host + ).filter_by(binary=binary + ).filter_by(deleted=False + ).one() + except exc.NoResultFound: + new_exc = exception.NotFound("No model for %s, %s" % (host, + binary)) + raise new_exc.__class__, new_exc, sys.exc_info()[2] class Instance(BASE, NovaBase): @@ -240,16 +230,6 @@ class Instance(BASE, NovaBase): reservation_id = Column(String(255)) mac_address = Column(String(255)) - def set_state(self, state_code, state_description=None): - """Set the code and description of an instance""" - # TODO(devcamcar): Move this out of models and into driver - from nova.compute import power_state - self.state = state_code - if not state_description: - state_description = power_state.name(state_code) - self.state_description = state_description - self.save() - # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused @@ -275,6 +255,7 @@ class Volume(BASE, NovaBase): size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance = relationship(Instance, backref=backref('volumes')) mountpoint = Column(String(255)) attach_time = Column(String(255)) # TODO(vish): datetime status = Column(String(255)) # TODO(vish): enum? @@ -405,18 +386,16 @@ class FixedIp(BASE, NovaBase): @classmethod def find_by_str(cls, str_id, session=None): - if session: - try: - return session.query(cls) \ - .filter_by(address=str_id) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - new_exc = exception.NotFound("No model for address %s" % str_id) - raise new_exc.__class__, new_exc, sys.exc_info()[2] - else: - with managed_session() as sess: - return cls.find_by_str(str_id, session=sess) + if not session: + session = get_session() + try: + return session.query(cls + ).filter_by(address=str_id + ).filter_by(deleted=False + ).one() + except exc.NoResultFound: + new_exc = exception.NotFound("No model for address %s" % str_id) + raise new_exc.__class__, new_exc, sys.exc_info()[2] class FloatingIp(BASE, NovaBase): @@ -436,18 +415,16 @@ class FloatingIp(BASE, NovaBase): @classmethod def find_by_str(cls, str_id, session=None): - if session: - try: - return session.query(cls) \ - .filter_by(address=str_id) \ - .filter_by(deleted=False) \ - .one() - except exc.NoResultFound: - new_exc = exception.NotFound("No model for address %s" % str_id) - raise new_exc.__class__, new_exc, sys.exc_info()[2] - else: - with managed_session() as sess: - return cls.find_by_str(str_id, session=sess) + if not session: + session = get_session() + try: + return session.query(cls + ).filter_by(address=str_id + ).filter_by(deleted=False + ).one() + except exc.NoResultFound: + new_exc = exception.NotFound("No model for address %s" % str_id) + raise new_exc.__class__, new_exc, sys.exc_info()[2] def register_models(): diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index adcc42293..69a205378 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -19,38 +19,24 @@ Session Handling for SQLAlchemy backend """ -import logging - from sqlalchemy import create_engine -from sqlalchemy.orm import create_session +from sqlalchemy.orm import sessionmaker from nova import flags FLAGS = flags.FLAGS - -def managed_session(autocommit=True): - """Helper method to grab session manager""" - return SessionExecutionManager(autocommit=autocommit) - - -class SessionExecutionManager: - """Session manager supporting with .. as syntax""" - _engine = None - _session = None - - def __init__(self, autocommit): - if not self._engine: - self._engine = create_engine(FLAGS.sql_connection, echo=False) - self._session = create_session(bind=self._engine, - autocommit=autocommit) - - def __enter__(self): - return self._session - - def __exit__(self, exc_type, exc_value, traceback): - if exc_type: - logging.exception("Rolling back due to failed transaction: %s", - exc_type) - self._session.rollback() - self._session.close() +_ENGINE = None +_MAKER = None + +def get_session(autocommit=True, expire_on_commit=False): + """Helper method to grab session""" + global _ENGINE + global _MAKER + if not _MAKER: + if not _ENGINE: + _ENGINE = create_engine(FLAGS.sql_connection, echo=False) + _MAKER = sessionmaker(bind=_ENGINE, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + return _MAKER() diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py index 3d91c66dc..c6dcb5320 100644 --- a/nova/endpoint/admin.py +++ b/nova/endpoint/admin.py @@ -53,7 +53,6 @@ def project_dict(project): def host_dict(host): """Convert a host model object to a result dict""" if host: - # FIXME(vish) return host.state else: return {} @@ -195,6 +194,8 @@ class AdminController(object): raise exception.ApiError('operation must be add or remove') return True + # FIXME(vish): these host commands don't work yet, perhaps some of the + # required data can be retrieved from service objects? @admin_only def describe_hosts(self, _context, **_kwargs): """Returns status info for all nodes. Includes: diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 5e5ed6c5e..3334f09af 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -78,7 +78,7 @@ class CloudController(object): if not os.path.exists(root_ca_path): start = os.getcwd() os.chdir(FLAGS.ca_path) - # TODO: Do this with M2Crypto instead + # TODO(vish): Do this with M2Crypto instead utils.runthis("Generating root CA: %s", "sh genrootca.sh") os.chdir(start) @@ -93,28 +93,30 @@ class CloudController(object): result[instance['key_name']] = [line] return result - def get_metadata(self, ipaddress): - i = db.fixed_ip_get_instance(ipaddress) - if i is None: + def get_metadata(self, address): + instance_ref = db.fixed_ip_get_instance(None, address) + if instance_ref is None: return None - mpi = self._get_mpi_data(i['project_id']) - if i['key_name']: + mpi = self._get_mpi_data(instance_ref['project_id']) + if instance_ref['key_name']: keys = { '0': { - '_name': i['key_name'], - 'openssh-key': i['key_data'] + '_name': instance_ref['key_name'], + 'openssh-key': instance_ref['key_data'] } } else: keys = '' - hostname = i['hostname'] + hostname = instance_ref['hostname'] + floating_ip = db.instance_get_floating_ip_address(None, + instance_ref['id']) data = { - 'user-data': base64.b64decode(i['user_data']), + 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { - 'ami-id': i['image_id'], - 'ami-launch-index': i['ami_launch_index'], - 'ami-manifest-path': 'FIXME', # image property - 'block-device-mapping': { # TODO: replace with real data + 'ami-id': instance_ref['image_id'], + 'ami-launch-index': instance_ref['ami_launch_index'], + 'ami-manifest-path': 'FIXME', + 'block-device-mapping': { # TODO(vish): replace with real data 'ami': 'sda1', 'ephemeral0': 'sda2', 'root': '/dev/sda1', @@ -122,27 +124,27 @@ class CloudController(object): }, 'hostname': hostname, 'instance-action': 'none', - 'instance-id': i['instance_id'], - 'instance-type': i.get('instance_type', ''), + 'instance-id': instance_ref['str_id'], + 'instance-type': instance_ref['instance_type'], 'local-hostname': hostname, - 'local-ipv4': i['private_dns_name'], # TODO: switch to IP - 'kernel-id': i.get('kernel_id', ''), + 'local-ipv4': address, + 'kernel-id': instance_ref['kernel_id'], 'placement': { - 'availaibility-zone': i.get('availability_zone', 'nova'), + 'availaibility-zone': instance_ref['availability_zone'], }, 'public-hostname': hostname, - 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-ipv4': floating_ip or '', 'public-keys': keys, - 'ramdisk-id': i.get('ramdisk_id', ''), - 'reservation-id': i['reservation_id'], - 'security-groups': i.get('groups', ''), + 'ramdisk-id': instance_ref['ramdisk_id'], + 'reservation-id': instance_ref['reservation_id'], + 'security-groups': '', 'mpi': mpi } } - if False: # TODO: store ancestor ids + if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] - if i.get('product_codes', None): - data['product-codes'] = i['product_codes'] + if False: # TODO(vish): store product codes + data['product-codes'] = [] return data @rbac.allow('all') @@ -376,7 +378,7 @@ class CloudController(object): v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] - # v['createTime'] = volume['create_time'] + v['createTime'] = volume['created_at'] if context.user.is_admin(): v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], @@ -419,7 +421,6 @@ class CloudController(object): # TODO(vish): abstract status checking? if volume_ref['attach_status'] == "attached": raise exception.ApiError("Volume is already attached") - #volume.start_attach(instance_id, device) instance_ref = db.instance_get_by_str(context, instance_id) host = db.instance_get_host(context, instance_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), @@ -445,7 +446,6 @@ class CloudController(object): if volume_ref['status'] == "available": raise exception.Error("Volume is already detached") try: - #volume.start_detach() host = db.instance_get_host(context, instance_ref['id']) rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "detach_volume", @@ -545,15 +545,12 @@ class CloudController(object): for floating_ip_ref in iterator: address = floating_ip_ref['id_str'] instance_ref = db.floating_ip_get_instance(address) - address_rv = { - 'public_ip': address, - 'instance_id': instance_ref['id_str'] - } + address_rv = {'public_ip': address, + 'instance_id': instance_ref['id_str']} if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s)" % ( - address_rv['instance_id'], - floating_ip_ref['project_id'], - ) + details = "%s (%s)" % (address_rv['instance_id'], + floating_ip_ref['project_id']) + address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} @@ -702,7 +699,6 @@ class CloudController(object): @defer.inlineCallbacks def terminate_instances(self, context, instance_id, **kwargs): logging.debug("Going to start terminating instances") - # network_topic = yield self._get_network_topic(context) for id_str in instance_id: logging.debug("Going to try and terminate %s" % id_str) try: diff --git a/nova/flags.py b/nova/flags.py index ebbfe3ff8..7b0c95a3c 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -203,12 +203,6 @@ DEFINE_string('vpn_key_suffix', DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') -# UNUSED -DEFINE_string('node_availability_zone', 'nova', - 'availability zone of this node') -DEFINE_string('host', socket.gethostname(), - 'name of this node') - DEFINE_string('sql_connection', 'sqlite:///%s/nova.sqlite' % os.path.abspath("./"), 'connection string for sql database') @@ -220,4 +214,9 @@ DEFINE_string('network_manager', 'nova.network.manager.VlanManager', DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager', 'Manager for volume') +DEFINE_string('host', socket.gethostname(), + 'name of this node') +# UNUSED +DEFINE_string('node_availability_zone', 'nova', + 'availability zone of this node') diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 1506e85ad..41aeb5da7 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -41,6 +41,9 @@ flags.DEFINE_string('bridge_dev', 'eth0', 'network device for bridges') +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] + + def bind_floating_ip(floating_ip): """Bind ip to public interface""" _execute("sudo ip addr add %s dev %s" % (floating_ip, @@ -61,9 +64,6 @@ def ensure_vlan_forward(public_ip, port, private_ip): % (public_ip, port, private_ip)) -DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] - - def ensure_floating_forward(floating_ip, fixed_ip): """Ensure floating ip forwarding rule""" _confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" @@ -208,16 +208,16 @@ def _remove_rule(cmd): def _dnsmasq_cmd(net): """Builds dnsmasq command""" cmd = ['sudo -E dnsmasq', - ' --strict-order', - ' --bind-interfaces', - ' --conf-file=', - ' --pid-file=%s' % _dhcp_file(net['vlan'], 'pid'), - ' --listen-address=%s' % net['gateway'], - ' --except-interface=lo', - ' --dhcp-range=%s,static,120s' % net['dhcp_start'], - ' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'), - ' --dhcp-script=%s' % _bin_file('nova-dhcpbridge'), - ' --leasefile-ro'] + ' --strict-order', + ' --bind-interfaces', + ' --conf-file=', + ' --pid-file=%s' % _dhcp_file(net['vlan'], 'pid'), + ' --listen-address=%s' % net['gateway'], + ' --except-interface=lo', + ' --dhcp-range=%s,static,120s' % net['dhcp_start'], + ' --dhcp-hostsfile=%s' % _dhcp_file(net['vlan'], 'conf'), + ' --dhcp-script=%s' % _bin_file('nova-dhcpbridge'), + ' --leasefile-ro'] return ''.join(cmd) diff --git a/nova/network/manager.py b/nova/network/manager.py index dbb8e66da..83de5d023 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -260,6 +260,7 @@ class VlanManager(NetworkManager): significant_bits = 32 - int(math.log(FLAGS.network_size, 2)) cidr = "%s/%s" % (private_net[start], significant_bits) project_net = IPy.IP(cidr) + net = {} net['cidr'] = cidr # NOTE(vish): we could turn these into properties diff --git a/nova/service.py b/nova/service.py index bc4b80fe4..60583dcdb 100644 --- a/nova/service.py +++ b/nova/service.py @@ -140,6 +140,7 @@ class Service(object, service.Service): logging.debug("The service database object disappeared, " "Recreating it.") self._create_service_ref() + service_ref = db.service_get(context, self.service_id) db.service_update(context, self.service_id, @@ -151,7 +152,7 @@ class Service(object, service.Service): logging.error("Recovered model server connection!") # TODO(vish): this should probably only catch connection errors - except: # pylint: disable-msg=W0702 + except Exception: # pylint: disable-msg=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True logging.exception("model server went away") diff --git a/nova/test.py b/nova/test.py index 4eb5c1c53..c392c8a84 100644 --- a/nova/test.py +++ b/nova/test.py @@ -39,12 +39,6 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') -from sqlalchemy import create_engine -from sqlalchemy.ext.declarative import declarative_base - -engine = create_engine('sqlite:///:memory:', echo=True) -Base = declarative_base() -Base.metadata.create_all(engine) def skip_if_fake(func): """Decorator that skips a test if running in fake mode""" diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py index fa0a090a0..59e1683db 100644 --- a/nova/tests/access_unittest.py +++ b/nova/tests/access_unittest.py @@ -33,8 +33,6 @@ class Context(object): class AccessTestCase(test.BaseTestCase): def setUp(self): super(AccessTestCase, self).setUp() - FLAGS.connection_type = 'fake' - FLAGS.fake_storage = True um = manager.AuthManager() # Make test users try: diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 59a81818c..b54e68274 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -34,8 +34,7 @@ FLAGS = flags.FLAGS class AuthTestCase(test.BaseTestCase): def setUp(self): super(AuthTestCase, self).setUp() - self.flags(connection_type='fake', - fake_storage=True) + self.flags(connection_type='fake') self.manager = manager.AuthManager() def test_001_can_create_users(self): diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index e6796e3da..29947e03c 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -39,8 +39,7 @@ FLAGS = flags.FLAGS class CloudTestCase(test.BaseTestCase): def setUp(self): super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', - fake_storage=True) + self.flags(connection_type='fake') self.conn = rpc.Connection.instance() logging.getLogger().setLevel(logging.DEBUG) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 867b572f3..746c035d6 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -38,8 +38,7 @@ class ComputeTestCase(test.TrialTestCase): def setUp(self): # pylint: disable-msg=C0103 logging.getLogger().setLevel(logging.DEBUG) super(ComputeTestCase, self).setUp() - self.flags(connection_type='fake', - fake_storage=True) + self.flags(connection_type='fake') self.compute = utils.import_object(FLAGS.compute_manager) self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') @@ -61,7 +60,7 @@ class ComputeTestCase(test.TrialTestCase): inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 - return db.instance_create(None, inst) + return db.instance_create(self.context, inst) @defer.inlineCallbacks def test_run_terminate(self): diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 3114912ba..8f4754650 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -20,8 +20,8 @@ from nova import flags FLAGS = flags.FLAGS -flags.DECLARE('fake_storage', 'nova.volume.manager') -FLAGS.fake_storage = True +flags.DECLARE('volume_driver', 'nova.volume.manager') +FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver' FLAGS.connection_type = 'fake' FLAGS.fake_rabbit = True FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' @@ -37,4 +37,3 @@ FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' -#FLAGS.sql_connection = 'mysql://root@localhost/test' diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index f3124c1ba..a89f1d622 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -39,7 +39,6 @@ class NetworkTestCase(test.TrialTestCase): # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge self.flags(connection_type='fake', - fake_storage=True, fake_network=True, auth_driver='nova.auth.ldapdriver.FakeLdapDriver', network_size=16, @@ -87,11 +86,12 @@ class NetworkTestCase(test.TrialTestCase): """Makes sure that we can allocaate a public ip""" # TODO(vish): better way of adding floating ips pubnet = IPy.IP(flags.FLAGS.public_range) - ip_str = str(pubnet[0]) + address = str(pubnet[0]) try: - db.floating_ip_get_by_address(None, ip_str) + db.floating_ip_get_by_address(None, address) except exception.NotFound: - db.floating_ip_create(None, ip_str, FLAGS.host) + db.floating_ip_create(None, {'address': address, + 'host': FLAGS.host}) float_addr = self.network.allocate_floating_ip(self.context, self.projects[0].id) fix_addr = self._create_address(0) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py index 121f4eb41..71da04992 100644 --- a/nova/tests/real_flags.py +++ b/nova/tests/real_flags.py @@ -21,7 +21,6 @@ from nova import flags FLAGS = flags.FLAGS FLAGS.connection_type = 'libvirt' -FLAGS.fake_storage = False FLAGS.fake_rabbit = False FLAGS.fake_network = False FLAGS.verbose = False diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 590d760b9..097a045e0 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -47,9 +47,9 @@ class ServiceTestCase(test.BaseTestCase): self.mox.StubOutWithMock(service, 'db') def test_create(self): - host='foo' - binary='nova-fake' - topic='fake' + host = 'foo' + binary = 'nova-fake' + topic = 'fake' self.mox.StubOutWithMock(rpc, 'AdapterConsumer', use_mock_anything=True) @@ -75,19 +75,19 @@ class ServiceTestCase(test.BaseTestCase): rpc.AdapterConsumer.attach_to_twisted() rpc.AdapterConsumer.attach_to_twisted() service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0} + 'binary': binary, + 'topic': topic, + 'report_count': 0} service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} + 'binary': binary, + 'report_count': 0, + 'id': 1} service.db.service_get_by_args(None, - host, - binary).AndRaise(exception.NotFound()) + host, + binary).AndRaise(exception.NotFound()) service.db.service_create(None, - service_create).AndReturn(service_ref['id']) + service_create).AndReturn(service_ref['id']) self.mox.ReplayAll() app = service.Service.create(host=host, binary=binary) @@ -101,15 +101,15 @@ class ServiceTestCase(test.BaseTestCase): host = 'foo' binary = 'bar' service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} + 'binary': binary, + 'report_count': 0, + 'id': 1} service.db.__getattr__('report_state') service.db.service_get_by_args(None, - host, - binary).AndReturn(service_ref) + host, + binary).AndReturn(service_ref) service.db.service_update(None, service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) + mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() s = service.Service() @@ -119,22 +119,22 @@ class ServiceTestCase(test.BaseTestCase): host = 'foo' binary = 'bar' service_create = {'host': host, - 'binary': binary, - 'report_count': 0} + 'binary': binary, + 'report_count': 0} service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} + 'binary': binary, + 'report_count': 0, + 'id': 1} service.db.__getattr__('report_state') service.db.service_get_by_args(None, host, binary).AndRaise(exception.NotFound()) service.db.service_create(None, - service_create).AndReturn(service_ref['id']) + service_create).AndReturn(service_ref['id']) service.db.service_get(None, service_ref['id']).AndReturn(service_ref) service.db.service_update(None, service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) + mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() s = service.Service() @@ -144,14 +144,14 @@ class ServiceTestCase(test.BaseTestCase): host = 'foo' binary = 'bar' service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} + 'binary': binary, + 'report_count': 0, + 'id': 1} service.db.__getattr__('report_state') service.db.service_get_by_args(None, - host, - binary).AndRaise(Exception()) + host, + binary).AndRaise(Exception()) self.mox.ReplayAll() s = service.Service() @@ -163,16 +163,16 @@ class ServiceTestCase(test.BaseTestCase): host = 'foo' binary = 'bar' service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} + 'binary': binary, + 'report_count': 0, + 'id': 1} service.db.__getattr__('report_state') service.db.service_get_by_args(None, - host, - binary).AndReturn(service_ref) + host, + binary).AndReturn(service_ref) service.db.service_update(None, service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) + mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() s = service.Service() diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py deleted file mode 100644 index f400cd2fd..000000000 --- a/nova/tests/storage_unittest.py +++ /dev/null @@ -1,115 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -from nova import exception -from nova import flags -from nova import test -from nova.compute import node -from nova.volume import storage - - -FLAGS = flags.FLAGS - - -class StorageTestCase(test.TrialTestCase): - def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) - super(StorageTestCase, self).setUp() - self.mynode = node.Node() - self.mystorage = None - self.flags(connection_type='fake', - fake_storage=True) - self.mystorage = storage.BlockStore() - - def test_run_create_volume(self): - vol_size = '0' - user_id = 'fake' - project_id = 'fake' - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) - # TODO(termie): get_volume returns differently than create_volume - self.assertEqual(volume_id, - storage.get_volume(volume_id)['volume_id']) - - rv = self.mystorage.delete_volume(volume_id) - self.assertRaises(exception.Error, - storage.get_volume, - volume_id) - - def test_too_big_volume(self): - vol_size = '1001' - user_id = 'fake' - project_id = 'fake' - self.assertRaises(TypeError, - self.mystorage.create_volume, - vol_size, user_id, project_id) - - def test_too_many_volumes(self): - vol_size = '1' - user_id = 'fake' - project_id = 'fake' - num_shelves = FLAGS.last_shelf_id - FLAGS.first_shelf_id + 1 - total_slots = FLAGS.slots_per_shelf * num_shelves - vols = [] - for i in xrange(total_slots): - vid = self.mystorage.create_volume(vol_size, user_id, project_id) - vols.append(vid) - self.assertRaises(storage.NoMoreVolumes, - self.mystorage.create_volume, - vol_size, user_id, project_id) - for id in vols: - self.mystorage.delete_volume(id) - - def test_run_attach_detach_volume(self): - # Create one volume and one node to test with - instance_id = "storage-test" - vol_size = "5" - user_id = "fake" - project_id = 'fake' - mountpoint = "/dev/sdf" - volume_id = self.mystorage.create_volume(vol_size, user_id, project_id) - - volume_obj = storage.get_volume(volume_id) - volume_obj.start_attach(instance_id, mountpoint) - rv = yield self.mynode.attach_volume(volume_id, - instance_id, - mountpoint) - self.assertEqual(volume_obj['status'], "in-use") - self.assertEqual(volume_obj['attachStatus'], "attached") - self.assertEqual(volume_obj['instance_id'], instance_id) - self.assertEqual(volume_obj['mountpoint'], mountpoint) - - self.assertRaises(exception.Error, - self.mystorage.delete_volume, - volume_id) - - rv = yield self.mystorage.detach_volume(volume_id) - volume_obj = storage.get_volume(volume_id) - self.assertEqual(volume_obj['status'], "available") - - rv = self.mystorage.delete_volume(volume_id) - self.assertRaises(exception.Error, - storage.get_volume, - volume_id) - - def test_multi_node(self): - # TODO(termie): Figure out how to test with two nodes, - # each of them having a different FLAG for storage_node - # This will allow us to test cross-node interactions - pass diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 0df0c20d6..9e35d2a1c 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -37,8 +37,7 @@ class VolumeTestCase(test.TrialTestCase): logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) - self.flags(connection_type='fake', - fake_storage=True) + self.flags(connection_type='fake') self.volume = utils.import_object(FLAGS.volume_manager) self.context = None @@ -100,7 +99,16 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_run_attach_detach_volume(self): """Make sure volume can be attached and detached from instance""" - instance_id = "storage-test" + inst = {} + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = 'fake' + inst['project_id'] = 'fake' + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + instance_id = db.instance_create(self.context, inst) mountpoint = "/dev/sdf" volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) @@ -113,8 +121,9 @@ class VolumeTestCase(test.TrialTestCase): vol = db.volume_get(None, volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") - self.assertEqual(vol['instance_id'], instance_id) self.assertEqual(vol['mountpoint'], mountpoint) + instance_ref = db.volume_get_instance(self.context, volume_id) + self.assertEqual(instance_ref['id'], instance_id) self.assertFailure(self.volume.delete_volume(self.context, volume_id), exception.Error) @@ -131,6 +140,7 @@ class VolumeTestCase(test.TrialTestCase): db.volume_get, None, volume_id) + db.instance_destroy(self.context, instance_id) @defer.inlineCallbacks def test_concurrent_volumes_get_different_blades(self): diff --git a/nova/utils.py b/nova/utils.py index 3e4a3d94f..011a5cb09 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -196,7 +196,7 @@ class LazyPluggable(object): fromlist = backend self.__backend = __import__(name, None, None, fromlist) - logging.error('backend %s', self.__backend) + logging.info('backend %s', self.__backend) return self.__backend def __getattr__(self, key): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7bf2a68b1..6f708bb80 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -139,12 +139,16 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_shutdown(): try: - instance.set_state(self.get_info(instance['name'])['state']) + db.instance_set_state(None, + instance['id'], + self.get_info(instance['name'])['state']) if instance.state == power_state.SHUTDOWN: timer.stop() d.callback(None) except Exception: - instance.set_state(power_state.SHUTDOWN) + db.instance_set_state(None, + instance['id'], + power_state.SHUTDOWN) timer.stop() d.callback(None) timer.f = _wait_for_shutdown @@ -186,14 +190,18 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_reboot(): try: - instance.set_state(self.get_info(instance['name'])['state']) + db.instance_set_state(None, + instance['id'], + self.get_info(instance['name'])['state']) if instance.state == power_state.RUNNING: logging.debug('instance %s: rebooted', instance['name']) timer.stop() d.callback(None) except Exception, exn: logging.error('_wait_for_reboot failed: %s', exn) - instance.set_state(power_state.SHUTDOWN) + db.instance_set_state(None, + instance['id'], + power_state.SHUTDOWN) timer.stop() d.callback(None) timer.f = _wait_for_reboot @@ -204,7 +212,10 @@ class LibvirtConnection(object): @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) - instance.set_state(power_state.NOSTATE, 'launching') + db.instance_set_state(None, + instance['id'], + power_state.NOSTATE, + 'launching') yield self._create_image(instance, xml) yield self._conn.createXML(xml, 0) # TODO(termie): this should actually register @@ -215,7 +226,9 @@ class LibvirtConnection(object): timer = task.LoopingCall(f=None) def _wait_for_boot(): try: - instance.set_state(self.get_info(instance['name'])['state']) + db.instance_set_state(None, + instance['id'], + self.get_info(instance['name'])['state']) if instance.state == power_state.RUNNING: logging.debug('instance %s: booted', instance['name']) timer.stop() @@ -223,7 +236,9 @@ class LibvirtConnection(object): except: logging.exception('instance %s: failed to boot', instance['name']) - instance.set_state(power_state.SHUTDOWN) + db.instance_set_state(None, + instance['id'], + power_state.SHUTDOWN) timer.stop() local_d.callback(None) timer.f = _wait_for_boot diff --git a/nova/volume/driver.py b/nova/volume/driver.py index f875e0213..4604b85d5 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -92,9 +92,9 @@ class AOEDriver(object): # NOTE(ja): wait for blades to appear yield self._execute("sleep 5") yield self._execute("sudo vblade-persist auto all", - check_exit_code=False) + check_exit_code=False) yield self._execute("sudo vblade-persist start all", - check_exit_code=False) + check_exit_code=False) class FakeAOEDriver(AOEDriver): diff --git a/nova/volume/manager.py b/nova/volume/manager.py index c4fa1f982..174c036d6 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -35,8 +35,6 @@ FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'nova', 'availability zone of this service') -flags.DEFINE_boolean('fake_storage', False, - 'Should we make real storage volumes to attach?') flags.DEFINE_string('volume_driver', 'nova.volume.driver.AOEDriver', 'Driver to use for volume creation') flags.DEFINE_integer('num_shelves', @@ -51,11 +49,7 @@ class AOEManager(manager.Manager): """Manages Ata-Over_Ethernet volumes""" def __init__(self, volume_driver=None, *args, **kwargs): if not volume_driver: - # NOTE(vish): support the legacy fake storage flag - if FLAGS.fake_storage: - volume_driver = 'nova.volume.driver.FakeAOEDriver' - else: - volume_driver = FLAGS.volume_driver + volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) super(AOEManager, self).__init__(*args, **kwargs) @@ -92,7 +86,9 @@ class AOEManager(manager.Manager): logging.debug("volume %s: exporting shelf %s & blade %s", volume_id, shelf_id, blade_id) - yield self.driver.create_export(volume_ref['str_id'], shelf_id, blade_id) + yield self.driver.create_export(volume_ref['str_id'], + shelf_id, + blade_id) # TODO(joshua): We need to trigger a fanout message # for aoe-discover on all the nodes |
