summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJosh Kearney <josh@jk0.org>2012-03-06 16:18:55 -0600
committerJosh Kearney <josh@jk0.org>2012-03-06 17:14:26 -0600
commit0356f8c1decdc6a2a50e53d148f9a2d8884a4b6f (patch)
treec45c1937c19967f53d2104c50c47c6a0536f3c3b
parent67715309a1cda8cc56bf4e528774f75e652bf6d5 (diff)
downloadnova-0356f8c1decdc6a2a50e53d148f9a2d8884a4b6f.tar.gz
nova-0356f8c1decdc6a2a50e53d148f9a2d8884a4b6f.tar.xz
nova-0356f8c1decdc6a2a50e53d148f9a2d8884a4b6f.zip
Cleans up a bunch of unused variables in XenAPI.
Also fixes a bug in XenAPI where xml.parsers.expat was not imported, but was being used to catch an exception. Change-Id: Idfb3b94317ecc13210fa221396c6f2dc10be3ac9
-rw-r--r--nova/tests/test_xenapi.py4
-rw-r--r--nova/virt/xenapi/host.py4
-rw-r--r--nova/virt/xenapi/pool.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py39
-rw-r--r--nova/virt/xenapi/vmops.py9
5 files changed, 28 insertions, 32 deletions
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 5c5a9d657..cc7dabd73 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1364,7 +1364,7 @@ class XenAPIBWUsageTestCase(test.TestCase):
self.conn = xenapi_conn.get_connection(False)
@classmethod
- def _fake_compile_metrics(cls, session, start_time, stop_time=None):
+ def _fake_compile_metrics(cls, start_time, stop_time=None):
raise exception.CouldNotFetchMetrics()
def test_get_all_bw_usage_in_failure_case(self):
@@ -1790,7 +1790,7 @@ class XenAPIAggregateTestCase(test.TestCase):
def test_join_slave(self):
"""Ensure join_slave gets called when the request gets to master."""
- def fake_join_slave(id, compute_uuid, host, url, user, password):
+ def fake_join_slave(id, compute_uuid, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 7e047da51..4069c484a 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -39,7 +39,7 @@ class Host(object):
self.XenAPI = session.get_imported_xenapi()
self._session = session
- def host_power_action(self, host, action):
+ def host_power_action(self, _host, action):
"""Reboots or shuts down the host."""
args = {"action": json.dumps(action)}
methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
@@ -92,7 +92,7 @@ class Host(object):
else:
return 'off_maintenance'
- def set_host_enabled(self, host, enabled):
+ def set_host_enabled(self, _host, enabled):
"""Sets the specified host's ability to accept new instances."""
args = {"enabled": json.dumps(enabled)}
response = call_xenhost(self._session, "set_host_enabled", args)
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 01db91e31..5661fa01a 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -76,7 +76,7 @@ class ResourcePool(object):
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
- self._join_slave(aggregate.id, host,
+ self._join_slave(aggregate.id,
kwargs.get('compute_uuid'),
kwargs.get('url'), kwargs.get('user'),
kwargs.get('passwd'))
@@ -127,7 +127,7 @@ class ResourcePool(object):
'from the pool; No master found')
% locals())
- def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
+ def _join_slave(self, aggregate_id, compute_uuid, url, user, passwd):
"""Joins a slave into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 5b4d178f8..e117b3d0a 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -21,6 +21,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
"""
import contextlib
+from decimal import Decimal, InvalidOperation
import json
import os
import pickle
@@ -29,8 +30,8 @@ import time
import urllib
import urlparse
import uuid
-from decimal import Decimal, InvalidOperation
from xml.dom import minidom
+from xml.parsers import expat
from eventlet import greenthread
@@ -437,7 +438,6 @@ class VMHelper(HelperBase):
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
sr_ref = cls.safe_find_sr(session)
copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
- copy_uuid = session.call_xenapi('VDI.get_uuid', copy_ref)
try:
# Resize partition and filesystem down
@@ -481,7 +481,7 @@ class VMHelper(HelperBase):
if len(partitions) != 1:
return
- num, start, old_sectors, ptype = partitions[0]
+ _num, start, old_sectors, ptype = partitions[0]
if ptype in ('ext3', 'ext4'):
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
@@ -677,8 +677,8 @@ class VMHelper(HelperBase):
return vdi_return_list
@classmethod
- def fetch_image(cls, context, session, instance, image, user_id,
- project_id, image_type):
+ def fetch_image(cls, context, session, instance, image, _user_id,
+ _project_id, image_type):
"""Fetch image from glance based on image type.
Returns: A single filename if image_type is KERNEL or RAMDISK
@@ -721,7 +721,7 @@ class VMHelper(HelperBase):
result = session.wait_for_task(task, instance['uuid'])
return json.loads(result)
except cls.XenAPI.Failure as exc:
- _type, method, error = exc.details[:3]
+ _type, _method, error = exc.details[:3]
if error == 'RetryableError':
LOG.error(_('download_vhd failed: %r') %
(exc.details[3:],))
@@ -735,7 +735,7 @@ class VMHelper(HelperBase):
@classmethod
def _fetch_image_glance_vhd(cls, context, session, instance, image,
- image_type):
+ _image_type):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
@@ -769,7 +769,7 @@ class VMHelper(HelperBase):
return vdis
@classmethod
- def _get_vdi_chain_size(cls, context, session, vdi_uuid):
+ def _get_vdi_chain_size(cls, session, vdi_uuid):
"""Compute the total size of a VDI chain, starting with the specified
VDI UUID.
@@ -787,7 +787,7 @@ class VMHelper(HelperBase):
@classmethod
def _check_vdi_size(cls, context, session, instance, vdi_uuid):
- size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid)
+ size_bytes = cls._get_vdi_chain_size(session, vdi_uuid)
# FIXME(jk0): this was copied directly from compute.manager.py, let's
# refactor this to a common area
@@ -930,8 +930,7 @@ class VMHelper(HelperBase):
return image_type
@classmethod
- def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
- os_type):
+ def determine_is_pv(cls, session, vdi_ref, disk_image_type, os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
@@ -1064,7 +1063,7 @@ class VMHelper(HelperBase):
'cpu_time': 0}
@classmethod
- def compile_diagnostics(cls, session, record):
+ def compile_diagnostics(cls, record):
"""Compile VM diagnostics data"""
try:
diags = {}
@@ -1086,7 +1085,7 @@ class VMHelper(HelperBase):
return {"Unable to retrieve diagnostics": e}
@classmethod
- def compile_metrics(cls, session, start_time, stop_time=None):
+ def compile_metrics(cls, start_time, stop_time=None):
"""Compile bandwidth usage, cpu, and disk metrics for all VMs on
this host"""
start_time = int(start_time)
@@ -1262,17 +1261,17 @@ def parse_rrd_update(doc, start, until=None):
meta = parse_rrd_meta(doc)
data = parse_rrd_data(doc)
for col, collabel in enumerate(meta['legend']):
- datatype, objtype, uuid, name = collabel.split(':')
+ _datatype, _objtype, uuid, name = collabel.split(':')
vm_data = sum_data.get(uuid, dict())
if name.startswith('vif'):
vm_data[name] = integrate_series(data, col, start, until)
else:
- vm_data[name] = average_series(data, col, start, until)
+ vm_data[name] = average_series(data, col, until)
sum_data[uuid] = vm_data
return sum_data
-def average_series(data, col, start, until=None):
+def average_series(data, col, until=None):
vals = [row['values'][col] for row in data
if (not until or (row['time'] <= until)) and
row['values'][col].is_finite()]
@@ -1350,7 +1349,7 @@ def get_vhd_parent_uuid(session, vdi_ref):
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
ret = get_vhd_parent(session, vdi_rec)
if ret:
- parent_ref, parent_rec = ret
+ _parent_ref, parent_rec = ret
return parent_rec["uuid"]
else:
return None
@@ -1392,7 +1391,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
# in the active vm/instance vdi chain.
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
parent_vdi_uuid = get_vhd_parent_uuid(session, vdi_ref)
- for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
+ for _ref, rec in _get_all_vdis_in_sr(session, sr_ref):
if ((rec['uuid'] != vdi_uuid) and
(rec['uuid'] != parent_vdi_uuid) and
(rec['sm_config'].get('vhd-parent') == original_parent_uuid)):
@@ -1566,7 +1565,7 @@ def _is_vdi_pv(dev):
def _get_partitions(dev):
"""Return partition information (num, size, type) for a device."""
dev_path = utils.make_dev_path(dev)
- out, err = utils.execute('parted', '--script', '--machine',
+ out, _err = utils.execute('parted', '--script', '--machine',
dev_path, 'unit s', 'print',
run_as_root=True)
lines = [line for line in out.split('\n') if line]
@@ -1733,7 +1732,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
def _mount_filesystem(dev_path, dir):
"""mounts the device specified by dev_path in dir"""
try:
- out, err = utils.execute('mount',
+ _out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
dev_path, dir, run_as_root=True)
except exception.ProcessExecutionError as e:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e0900cc53..658a6da40 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -402,8 +402,7 @@ class VMOps(object):
vm_mode = 'hvm' # Normalize
else:
use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
+ first_vdi_ref, disk_image_type, instance.os_type)
vm_mode = use_pv_kernel and 'pv' or 'hvm'
if instance.vm_mode != vm_mode:
@@ -1432,15 +1431,13 @@ class VMOps(object):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
- return VMHelper.compile_diagnostics(self._session, vm_rec)
+ return VMHelper.compile_diagnostics(vm_rec)
def get_all_bw_usage(self, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
try:
- metrics = VMHelper.compile_metrics(self._session,
- start_time,
- stop_time)
+ metrics = VMHelper.compile_metrics(start_time, stop_time)
except exception.CouldNotFetchMetrics:
LOG.exception(_("Could not get bandwidth info."))
return {}