diff options
| author | Jenkins <jenkins@review.openstack.org> | 2012-12-04 19:33:15 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2012-12-04 19:33:15 +0000 |
| commit | ad3176ee5318fccceff48cbdb6c8c910579c4698 (patch) | |
| tree | 053f2923fa65599ed43bdc5928f925494357ad96 /nova | |
| parent | 0e43b371db88711c30c70d5f1dec1f6ccc51e0ec (diff) | |
| parent | f51885a9732c84d3b91d049ae5fe995d9296795e (diff) | |
| download | nova-ad3176ee5318fccceff48cbdb6c8c910579c4698.tar.gz nova-ad3176ee5318fccceff48cbdb6c8c910579c4698.tar.xz nova-ad3176ee5318fccceff48cbdb6c8c910579c4698.zip | |
Merge "Implements volume usage metering."
Diffstat (limited to 'nova')
| -rw-r--r-- | nova/compute/manager.py | 88 | ||||
| -rw-r--r-- | nova/compute/utils.py | 23 | ||||
| -rw-r--r-- | nova/db/api.py | 18 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/api.py | 79 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py | 69 | ||||
| -rw-r--r-- | nova/db/sqlalchemy/models.py | 18 | ||||
| -rw-r--r-- | nova/tests/test_db_api.py | 86 | ||||
| -rw-r--r-- | nova/tests/test_libvirt.py | 50 | ||||
| -rw-r--r-- | nova/virt/driver.py | 5 | ||||
| -rw-r--r-- | nova/virt/fake.py | 7 | ||||
| -rw-r--r-- | nova/virt/libvirt/driver.py | 42 |
11 files changed, 483 insertions, 2 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index de3d3ddd4..16b7b19dc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -122,6 +122,9 @@ interval_opts = [ cfg.IntOpt('reclaim_instance_interval', default=0, help='Interval in seconds for reclaiming deleted instances'), + cfg.IntOpt('volume_usage_poll_interval', + default=0, + help='Interval in seconds for gathering volume usages'), ] timeout_opts = [ @@ -312,6 +315,7 @@ class ComputeManager(manager.SchedulerDependentManager): CONF.network_manager, host=kwargs.get('host', None)) self._last_host_check = 0 self._last_bw_usage_poll = 0 + self._last_vol_usage_poll = 0 self._last_info_cache_heal = 0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() @@ -2389,6 +2393,24 @@ class ComputeManager(manager.SchedulerDependentManager): """Detach a volume from an instance.""" bdm = self._get_instance_volume_bdm(context, instance['uuid'], volume_id) + if CONF.volume_usage_poll_interval > 0: + vol_stats = [] + mp = bdm['device_name'] + # Handle bootable volumes which will not contain /dev/ + if '/dev/' in mp: + mp = mp[5:] + try: + vol_stats = self.driver.block_stats(instance['name'], mp) + except NotImplementedError: + pass + + if vol_stats: + LOG.debug(_("Updating volume usage cache with totals")) + rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats + self.db.vol_usage_update(context, volume_id, rd_req, rd_bytes, + wr_req, wr_bytes, instance['id'], + update_totals=True) + self._detach_volume(context, instance, bdm) volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) @@ -2954,6 +2976,72 @@ class ComputeManager(manager.SchedulerDependentManager): bw_ctr['bw_out'], last_refreshed=refreshed) + def _get_host_volume_bdms(self, context, host): + """Return all block device mappings on a compute host""" + compute_host_bdms = [] + instances = self.db.instance_get_all_by_host(context, self.host) + for instance in instances: + instance_bdms = self._get_instance_volume_bdms(context, + instance['uuid']) + compute_host_bdms.append(dict(instance=instance, + instance_bdms=instance_bdms)) + + return compute_host_bdms + + def _update_volume_usage_cache(self, context, vol_usages, refreshed): + """Updates the volume usage cache table with a list of stats""" + for usage in vol_usages: + # Allow switching of greenthreads between queries. + greenthread.sleep(0) + self.db.vol_usage_update(context, usage['volume'], usage['rd_req'], + usage['rd_bytes'], usage['wr_req'], + usage['wr_bytes'], usage['instance_id'], + last_refreshed=refreshed) + + def _send_volume_usage_notifications(self, context, start_time): + """Queries vol usage cache table and sends a vol usage notification""" + # We might have had a quick attach/detach that we missed in + # the last run of get_all_volume_usage and this one + # but detach stats will be recorded in db and returned from + # vol_get_usage_by_time + vol_usages = self.db.vol_get_usage_by_time(context, start_time) + for vol_usage in vol_usages: + notifier.notify(context, 'volume.%s' % self.host, 'volume.usage', + notifier.INFO, + compute_utils.usage_volume_info(vol_usage)) + + @manager.periodic_task + def _poll_volume_usage(self, context, start_time=None): + if CONF.volume_usage_poll_interval == 0: + return + else: + if not start_time: + start_time = utils.last_completed_audit_period()[1] + + curr_time = time.time() + if (curr_time - self._last_vol_usage_poll) < \ + CONF.volume_usage_poll_interval: + return + else: + self._last_vol_usage_poll = curr_time + compute_host_bdms = self._get_host_volume_bdms(context, + self.host) + if not compute_host_bdms: + return + else: + LOG.debug(_("Updating volume usage cache")) + try: + vol_usages = self.driver.get_all_volume_usage(context, + compute_host_bdms) + except NotImplementedError: + return + + refreshed = timeutils.utcnow() + self._update_volume_usage_cache(context, vol_usages, + refreshed) + + self._send_volume_usage_notifications(context, start_time) + @manager.periodic_task def _report_driver_status(self, context): curr_time = time.time() diff --git a/nova/compute/utils.py b/nova/compute/utils.py index cc25ac6cf..a0dfbea8d 100644 --- a/nova/compute/utils.py +++ b/nova/compute/utils.py @@ -227,3 +227,26 @@ def start_instance_usage_audit(context, begin, end, host, num_instances): def finish_instance_usage_audit(context, begin, end, host, errors, message): db.task_log_end_task(context, "instance_usage_audit", begin, end, host, errors, message) + + +def usage_volume_info(vol_usage): + def null_safe_str(s): + return str(s) if s else '' + + tot_refreshed = vol_usage['tot_last_refreshed'] + curr_refreshed = vol_usage['curr_last_refreshed'] + last_refreshed_time = (tot_refreshed if tot_refreshed > curr_refreshed + else curr_refreshed) + + usage_info = dict( + volume_id=vol_usage['volume_id'], + instance_id=vol_usage['instance_id'], + last_refreshed=null_safe_str(last_refreshed_time), + reads=vol_usage['tot_reads'] + vol_usage['curr_reads'], + read_bytes=vol_usage['tot_read_bytes'] + + vol_usage['curr_read_bytes'], + writes=vol_usage['tot_writes'] + vol_usage['curr_writes'], + write_bytes=vol_usage['tot_write_bytes'] + + vol_usage['curr_write_bytes']) + + return usage_info diff --git a/nova/db/api.py b/nova/db/api.py index b496e4bd0..ad928f585 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1433,6 +1433,24 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, ################### +def vol_get_usage_by_time(context, begin): + """Return volumes usage that have been updated after a specified time""" + return IMPL.vol_get_usage_by_time(context, begin) + + +def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, + instance_id, last_refreshed=None, update_totals=False): + """Update cached volume usage for a volume + Creates new record if needed.""" + return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req, + wr_bytes, instance_id, + last_refreshed=last_refreshed, + update_totals=update_totals) + + +################### + + def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" return IMPL.s3_image_get(context, image_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c4558a84c..c1b6e66dd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -4134,6 +4134,85 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, #################### +@require_context +def vol_get_usage_by_time(context, begin): + """Return volumes usage that have been updated after a specified time""" + return model_query(context, models.VolumeUsage, read_deleted="yes").\ + filter(or_(models.VolumeUsage.tot_last_refreshed == None, + models.VolumeUsage.tot_last_refreshed > begin, + models.VolumeUsage.curr_last_refreshed == None, + models.VolumeUsage.curr_last_refreshed > begin, + )).\ + all() + + +@require_context +def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, + instance_id, last_refreshed=None, update_totals=False, + session=None): + if not session: + session = get_session() + + if last_refreshed is None: + last_refreshed = timeutils.utcnow() + + with session.begin(): + values = {} + # NOTE(dricco): We will be mostly updating current usage records vs + # updating total or creating records. Optimize accordingly. + if not update_totals: + values = {'curr_last_refreshed': last_refreshed, + 'curr_reads': rd_req, + 'curr_read_bytes': rd_bytes, + 'curr_writes': wr_req, + 'curr_write_bytes': wr_bytes, + 'instance_id': instance_id} + else: + values = {'tot_last_refreshed': last_refreshed, + 'tot_reads': models.VolumeUsage.tot_reads + rd_req, + 'tot_read_bytes': models.VolumeUsage.tot_read_bytes + + rd_bytes, + 'tot_writes': models.VolumeUsage.tot_writes + wr_req, + 'tot_write_bytes': models.VolumeUsage.tot_write_bytes + + wr_bytes, + 'curr_reads': 0, + 'curr_read_bytes': 0, + 'curr_writes': 0, + 'curr_write_bytes': 0, + 'instance_id': instance_id} + + rows = model_query(context, models.VolumeUsage, + session=session, read_deleted="yes").\ + filter_by(volume_id=id).\ + update(values, synchronize_session=False) + + if rows: + return + + vol_usage = models.VolumeUsage() + vol_usage.tot_last_refreshed = timeutils.utcnow() + vol_usage.curr_last_refreshed = timeutils.utcnow() + vol_usage.volume_id = id + + if not update_totals: + vol_usage.curr_reads = rd_req + vol_usage.curr_read_bytes = rd_bytes + vol_usage.curr_writes = wr_req + vol_usage.curr_write_bytes = wr_bytes + else: + vol_usage.tot_reads = rd_req + vol_usage.tot_read_bytes = rd_bytes + vol_usage.tot_writes = wr_req + vol_usage.tot_write_bytes = wr_bytes + + vol_usage.save(session=session) + + return + + +#################### + + def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" result = model_query(context, models.S3Image, read_deleted="yes").\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py new file mode 100644 index 000000000..7adbcb938 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/145_add_volume_usage_cache.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime +from sqlalchemy import Boolean, BigInteger, MetaData, Integer, String, Table + +from nova.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # Create new table + volume_usage_cache = Table('volume_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', String(36), nullable=False), + Column("instance_id", Integer()), + Column('tot_last_refreshed', DateTime(timezone=False)), + Column('tot_reads', BigInteger(), default=0), + Column('tot_read_bytes', BigInteger(), default=0), + Column('tot_writes', BigInteger(), default=0), + Column('tot_write_bytes', BigInteger(), default=0), + Column('curr_last_refreshed', DateTime(timezone=False)), + Column('curr_reads', BigInteger(), default=0), + Column('curr_read_bytes', BigInteger(), default=0), + Column('curr_writes', BigInteger(), default=0), + Column('curr_write_bytes', BigInteger(), default=0), + mysql_engine='InnoDB', + mysql_charset='utf8' + ) + + try: + volume_usage_cache.create() + except Exception: + LOG.exception("Exception while creating table 'volume_usage_cache'") + meta.drop_all(tables=[volume_usage_cache]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volume_usage_cache = Table('volume_usage_cache', meta, autoload=True) + try: + volume_usage_cache.drop() + except Exception: + LOG.error(_("volume_usage_cache table not dropped")) + raise diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a09517b67..a038b6745 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -938,6 +938,24 @@ class BandwidthUsage(BASE, NovaBase): last_ctr_out = Column(BigInteger) +class VolumeUsage(BASE, NovaBase): + """Cache for volume usage data pulled from the hypervisor""" + __tablename__ = 'volume_usage_cache' + id = Column(Integer, primary_key=True, nullable=False) + volume_id = Column(String(36), nullable=False) + instance_id = Column(Integer) + tot_last_refreshed = Column(DateTime) + tot_reads = Column(BigInteger, default=0) + tot_read_bytes = Column(BigInteger, default=0) + tot_writes = Column(BigInteger, default=0) + tot_write_bytes = Column(BigInteger, default=0) + curr_last_refreshed = Column(DateTime) + curr_reads = Column(BigInteger, default=0) + curr_read_bytes = Column(BigInteger, default=0) + curr_writes = Column(BigInteger, default=0) + curr_write_bytes = Column(BigInteger, default=0) + + class S3Image(BASE, NovaBase): """Compatibility layer for the S3 image service talking to Glance""" __tablename__ = 's3_images' diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 546aeaa97..f2124c021 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -1315,3 +1315,89 @@ class InstanceDestroyConstraints(test.TestCase): ctx, instance['uuid'], constraint) instance = db.instance_get_by_uuid(ctx, instance['uuid']) self.assertFalse(instance['deleted']) + + +class VolumeUsageDBApiTestCase(test.TestCase): + def setUp(self): + super(VolumeUsageDBApiTestCase, self).setUp() + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + + def test_vol_usage_update_no_totals_update(self): + ctxt = context.get_admin_context() + now = timeutils.utcnow() + timeutils.set_time_override(now) + start_time = now - datetime.timedelta(seconds=10) + refreshed_time = now - datetime.timedelta(seconds=5) + + expected_vol_usages = [{'volume_id': u'1', + 'curr_reads': 1000, + 'curr_read_bytes': 2000, + 'curr_writes': 3000, + 'curr_write_bytes': 4000}, + {'volume_id': u'2', + 'curr_reads': 100, + 'curr_read_bytes': 200, + 'curr_writes': 300, + 'curr_write_bytes': 400}] + + def _compare(vol_usage, expected): + for key, value in expected.items(): + self.assertEqual(vol_usage[key], value) + + vol_usages = db.vol_get_usage_by_time(ctxt, start_time) + self.assertEqual(len(vol_usages), 0) + + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=10, rd_bytes=20, + wr_req=30, wr_bytes=40, instance_id=1) + vol_usage = db.vol_usage_update(ctxt, 2, rd_req=100, rd_bytes=200, + wr_req=300, wr_bytes=400, + instance_id=1) + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=1000, rd_bytes=2000, + wr_req=3000, wr_bytes=4000, + instance_id=1, + last_refreshed=refreshed_time) + + vol_usages = db.vol_get_usage_by_time(ctxt, start_time) + self.assertEqual(len(vol_usages), 2) + _compare(vol_usages[0], expected_vol_usages[0]) + _compare(vol_usages[1], expected_vol_usages[1]) + timeutils.clear_time_override() + + def test_vol_usage_update_totals_update(self): + ctxt = context.get_admin_context() + now = timeutils.utcnow() + timeutils.set_time_override(now) + start_time = now - datetime.timedelta(seconds=10) + expected_vol_usages = {'volume_id': u'1', + 'tot_reads': 600, + 'tot_read_bytes': 800, + 'tot_writes': 1000, + 'tot_write_bytes': 1200, + 'curr_reads': 0, + 'curr_read_bytes': 0, + 'curr_writes': 0, + 'curr_write_bytes': 0} + + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=100, rd_bytes=200, + wr_req=300, wr_bytes=400, + instance_id=1) + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=200, rd_bytes=300, + wr_req=400, wr_bytes=500, + instance_id=1, + update_totals=True) + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=300, rd_bytes=400, + wr_req=500, wr_bytes=600, + instance_id=1) + vol_usage = db.vol_usage_update(ctxt, 1, rd_req=400, rd_bytes=500, + wr_req=600, wr_bytes=700, + instance_id=1, + update_totals=True) + + vol_usages = db.vol_get_usage_by_time(ctxt, start_time) + + self.assertEquals(1, len(vol_usages)) + for key, value in expected_vol_usages.items(): + self.assertEqual(vol_usages[0][key], value) + timeutils.clear_time_override() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 5c90ebcd1..5df7d920d 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -4213,6 +4213,56 @@ class LibvirtDriverTestCase(test.TestCase): _fake_network_info(self.stubs, 1)) +class LibvirtVolumeUsageTestCase(test.TestCase): + """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver + .get_all_volume_usage""" + + def setUp(self): + super(LibvirtVolumeUsageTestCase, self).setUp() + self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) + self.c = context.get_admin_context() + + # creating instance + inst = {} + inst['uuid'] = '875a8070-d0b9-4949-8b31-104d125c9a64' + self.ins_ref = db.instance_create(self.c, inst) + + # verify bootable volume device path also + self.bdms = [{'volume_id': 1, + 'device_name': '/dev/vde'}, + {'volume_id': 2, + 'device_name': 'vda'}] + + def test_get_all_volume_usage(self): + def fake_block_stats(instance_name, disk): + return (169L, 688640L, 0L, 0L, -1L) + + self.stubs.Set(self.conn, 'block_stats', fake_block_stats) + vol_usage = self.conn.get_all_volume_usage(self.c, + [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) + + expected_usage = [{'volume': 1, + 'instance_id': 1, + 'rd_bytes': 688640L, 'wr_req': 0L, + 'flush_operations': -1L, 'rd_req': 169L, + 'wr_bytes': 0L}, + {'volume': 2, + 'instance_id': 1, + 'rd_bytes': 688640L, 'wr_req': 0L, + 'flush_operations': -1L, 'rd_req': 169L, + 'wr_bytes': 0L}] + self.assertEqual(vol_usage, expected_usage) + + def test_get_all_volume_usage_device_not_found(self): + def fake_lookup(instance_name): + raise libvirt.libvirtError('invalid path') + + self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup) + vol_usage = self.conn.get_all_volume_usage(self.c, + [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) + self.assertEqual(vol_usage, []) + + class LibvirtNonblockingTestCase(test.TestCase): """Test libvirt_nonblocking option""" diff --git a/nova/virt/driver.py b/nova/virt/driver.py index ee775bc6b..005012c7f 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -251,6 +251,11 @@ class ComputeDriver(object): running VM""" raise NotImplementedError() + def get_all_volume_usage(self, context, compute_host_bdms): + """Return usage info for volumes attached to vms on + a given host""" + raise NotImplementedError() + def get_host_ip_addr(self): """ Retrieves the IP address of the dom0 diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 9f5956b0d..fb1ed5558 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -248,6 +248,13 @@ class FakeDriver(driver.ComputeDriver): bw = [] return bw + def get_all_volume_usage(self, context, instances, start_time, + stop_time=None): + """Return usage info for volumes attached to vms on + a given host""" + volusage = [] + return volusage + def block_stats(self, instance_name, disk_id): return [0L, 0L, 0L, 0L, None] diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 4d2408ff1..750ffb0a2 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -2203,12 +2203,50 @@ class LibvirtDriver(driver.ComputeDriver): # data format needs to be standardized across drivers return jsonutils.dumps(cpu_info) + def get_all_volume_usage(self, context, compute_host_bdms): + """Return usage info for volumes attached to vms on + a given host""" + vol_usage = [] + + for instance_bdms in compute_host_bdms: + instance = instance_bdms['instance'] + + for bdm in instance_bdms['instance_bdms']: + vol_stats = [] + mountpoint = bdm['device_name'] + if mountpoint.startswith('/dev/'): + mountpoint = mountpoint[5:] + + LOG.debug(_("Trying to get stats for the volume %s"), + bdm['volume_id']) + vol_stats = self.block_stats(instance['name'], mountpoint) + + if vol_stats: + rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats + vol_usage.append(dict(volume=bdm['volume_id'], + instance_id=instance['id'], + rd_req=rd_req, + rd_bytes=rd_bytes, + wr_req=wr_req, + wr_bytes=wr_bytes, + flush_operations=flush_ops)) + return vol_usage + def block_stats(self, instance_name, disk): """ Note that this function takes an instance name. """ - domain = self._lookup_by_name(instance_name) - return domain.blockStats(disk) + try: + domain = self._lookup_by_name(instance_name) + return domain.blockStats(disk) + except libvirt.libvirtError as e: + errcode = e.get_error_code() + LOG.info(_("Getting block stats failed, device might have " + "been detached. Code=%(errcode)s Error=%(e)s") + % locals()) + except exception.InstanceNotFound: + LOG.info(_("Could not find domain in libvirt for instance %s. " + "Cannot get block stats for device") % instance_name) def interface_stats(self, instance_name, interface): """ |
