diff options
| author | Joe Heck <heckj@mac.com> | 2010-08-10 16:27:13 +0000 |
|---|---|---|
| committer | Tarmac <> | 2010-08-10 16:27:13 +0000 |
| commit | f48ef5ef4d3ca39084c66d874bf1e99ff81e9f48 (patch) | |
| tree | 22c04f754b71d4ba86779f9e1c2e626e067b16d7 | |
| parent | b13f0646a186b915fccacdce1f9d7a03add56e32 (diff) | |
| parent | b17c70543e76667389835d3e322b09038ec93c8c (diff) | |
| download | nova-f48ef5ef4d3ca39084c66d874bf1e99ff81e9f48.tar.gz nova-f48ef5ef4d3ca39084c66d874bf1e99ff81e9f48.tar.xz nova-f48ef5ef4d3ca39084c66d874bf1e99ff81e9f48.zip | |
2 changes in doing PEP8 & Pylint cleaning:
* adding pep8 and pylint to the PIP requirements files for Tools
* light cleaning work (mostly formatting) on nova/endpoints/cloud.py
Note: On my laptop, I had two tests failing just with a basic setup. I checked against all the tests again after cleanup, and had no additional or different failures.
| -rw-r--r-- | nova/auth/manager.py | 11 | ||||
| -rw-r--r-- | nova/endpoint/cloud.py | 105 | ||||
| -rw-r--r-- | nova/network/model.py | 34 | ||||
| -rw-r--r-- | tools/pip-requires | 2 |
4 files changed, 80 insertions, 72 deletions
diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d44ed52b2..6d71a7ad6 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -29,15 +29,13 @@ import uuid import zipfile from nova import crypto -from nova import datastore from nova import exception from nova import flags -from nova import objectstore # for flags from nova import utils -from nova.auth import ldapdriver # for flags from nova.auth import signer from nova.network import vpn + FLAGS = flags.FLAGS # NOTE(vish): a user with one of these roles will be a superuser and @@ -99,6 +97,7 @@ class AuthBase(object): class User(AuthBase): """Object representing a user""" def __init__(self, id, name, access, secret, admin): + AuthBase.__init__(self) self.id = id self.name = name self.access = access @@ -159,6 +158,7 @@ class KeyPair(AuthBase): fingerprint is stored. The user's private key is not saved. """ def __init__(self, id, name, owner_id, public_key, fingerprint): + AuthBase.__init__(self) self.id = id self.name = name self.owner_id = owner_id @@ -176,6 +176,7 @@ class KeyPair(AuthBase): class Project(AuthBase): """Represents a Project returned from the datastore""" def __init__(self, id, name, project_manager_id, description, member_ids): + AuthBase.__init__(self) self.id = id self.name = name self.project_manager_id = project_manager_id @@ -234,7 +235,7 @@ class AuthManager(object): AuthManager also manages associated data related to Auth objects that need to be more accessible, such as vpn ips and ports. """ - _instance=None + _instance = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" if not cls._instance: @@ -248,7 +249,7 @@ class AuthManager(object): reset the driver if it is not set or a new driver is specified. """ if driver or not getattr(self, 'driver', None): - self.driver = utils.import_class(driver or FLAGS.auth_driver) + self.driver = utils.import_class(driver or FLAGS.auth_driver) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +205,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +230,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +249,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +283,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +296,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +346,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +370,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +423,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +441,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +456,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +476,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +590,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +645,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +655,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +688,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') diff --git a/nova/network/model.py b/nova/network/model.py index daac035e4..eada776c7 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -97,11 +97,11 @@ class Vlan(datastore.BasicModel): def dict_by_vlan(cls): """a hash of vlan:project""" set_name = cls._redis_set_name(cls.__name__) - rv = {} - h = datastore.Redis.instance().hgetall(set_name) - for v in h.keys(): - rv[h[v]] = v - return rv + retvals = {} + hashset = datastore.Redis.instance().hgetall(set_name) + for val in hashset.keys(): + retvals[hashset[val]] = val + return retvals @classmethod @datastore.absorb_connection_error @@ -136,7 +136,8 @@ class Vlan(datastore.BasicModel): # CLEANUP: # TODO(ja): Save the IPs at the top of each subnet for cloudpipe vpn clients -# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? +# TODO(ja): does vlanpool "keeper" need to know the min/max - +# shouldn't FLAGS always win? # TODO(joshua): Save the IPs at the top of each subnet for cloudpipe vpn clients class BaseNetwork(datastore.BasicModel): @@ -217,7 +218,9 @@ class BaseNetwork(datastore.BasicModel): def available(self): # the .2 address is always CloudPipe # and the top <n> are for vpn clients - for idx in range(self.num_static_ips, len(self.network)-(1 + FLAGS.cnt_vpn_clients)): + num_ips = self.num_static_ips + num_clients = FLAGS.cnt_vpn_clients + for idx in range(num_ips, len(self.network)-(1 + num_clients)): address = str(self.network[idx]) if not address in self.hosts.keys(): yield address @@ -338,8 +341,9 @@ class DHCPNetwork(BridgedNetwork): private_ip = str(self.network[2]) linux_net.confirm_rule("FORWARD -d %s -p udp --dport 1194 -j ACCEPT" % (private_ip, )) - linux_net.confirm_rule("PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" - % (self.project.vpn_ip, self.project.vpn_port, private_ip)) + linux_net.confirm_rule( + "PREROUTING -t nat -d %s -p udp --dport %s -j DNAT --to %s:1194" + % (self.project.vpn_ip, self.project.vpn_port, private_ip)) def deexpress(self, address=None): # if this is the last address, stop dns @@ -374,13 +378,14 @@ class PublicAddress(datastore.BasicModel): return addr -DEFAULT_PORTS = [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)] +DEFAULT_PORTS = [("tcp", 80), ("tcp", 22), ("udp", 1194), ("tcp", 443)] class PublicNetworkController(BaseNetwork): override_type = 'network' def __init__(self, *args, **kwargs): network_id = "public:default" - super(PublicNetworkController, self).__init__(network_id, FLAGS.public_range) + super(PublicNetworkController, self).__init__(network_id, + FLAGS.public_range) self['user_id'] = "public" self['project_id'] = "public" self["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) @@ -415,7 +420,7 @@ class PublicNetworkController(BaseNetwork): def deallocate_ip(self, ip_str): # NOTE(vish): cleanup is now done on release by the parent class - self.release_ip(ip_str) + self.release_ip(ip_str) def associate_address(self, public_ip, private_ip, instance_id): if not public_ip in self.assigned: @@ -461,8 +466,9 @@ class PublicNetworkController(BaseNetwork): linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) for (protocol, port) in DEFAULT_PORTS: - linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" - % (private_ip, protocol, port)) + linux_net.confirm_rule( + "FORWARD -d %s -p %s --dport %s -j ACCEPT" + % (private_ip, protocol, port)) def deexpress(self, address=None): addr = self.get_host(address) diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..24aefb25e 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,3 +1,5 @@ +pep8==0.5.0 +pylint==0.21.1 IPy==0.70 M2Crypto==0.20.2 amqplib==0.6.1 |
