summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Bellinger <nab@linux-iscsi.org>2012-05-09 12:42:09 -0700
committerNicholas Bellinger <nab@linux-iscsi.org>2012-05-09 15:08:47 -0700
commit11e764bd5ed4bb930e0ec5dd161df58307507347 (patch)
treefa753a950d19c2ef848c50c60f71019dcb912e74
parent2301917044b96fda41f794011368e623a9b7a435 (diff)
downloadlinux-11e764bd5ed4bb930e0ec5dd161df58307507347.tar.gz
linux-11e764bd5ed4bb930e0ec5dd161df58307507347.tar.xz
linux-11e764bd5ed4bb930e0ec5dd161df58307507347.zip
target: Remove max_sectors device attribute for modern se_task less code
This patch removes the original usage of dev_attr->max_sectors in favor of dev_attr->hw_max_sectors that is now being enforced by target core from within transport_generic_cmd_sequencer() for SCF_SCSI_DATA_SG_IO_CDB ops. After the recent se_task removal patches from hch, this value for IBLOCK backends being set via configfs by userspace from an saved max_sectors value that is turning out to be problematic, so it makes sense to go ahead and remove this now legacy attribute all-together. This patch also continues to make se_dev_set_default_attribs() do (sectors / block_size) alignment for what actually get used by target_core_mod to be safe here, following the same alignment currently used by fabric_max_sectors. Reported-by: Andy Grover <agrover@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Roland Dreier <roland@purestorage.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_cdb.c2
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_device.c69
-rw-r--r--drivers/target/target_core_transport.c7
-rw-r--r--include/target/target_core_base.h1
5 files changed, 9 insertions, 74 deletions
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 22cf44cf43ac..9888693a18fe 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -458,7 +458,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* Set MAXIMUM TRANSFER LENGTH
*/
max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
- dev->se_sub_dev->se_dev_attrib.max_sectors);
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]);
/*
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index cbb66537d230..931521cc4e4f 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_max_sectors);
SE_DEV_ATTR_RO(hw_max_sectors);
-DEF_DEV_ATTRIB(max_sectors);
-SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
-
DEF_DEV_ATTRIB(fabric_max_sectors);
SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
@@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
- &target_core_dev_attrib_max_sectors.attr,
&target_core_dev_attrib_fabric_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e621350feebc..5ad972856a8d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -876,15 +876,12 @@ void se_dev_set_default_attribs(
dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
- * max_sectors is based on subsystem plugin dependent requirements.
+ * Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
+ limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
limits->logical_block_size);
- dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+
/*
* Set fabric_max_sectors, which is reported in block limits
* VPD page (B0h).
@@ -1168,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return 0;
}
-int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
-{
- int force = 0; /* Force setting for VDEVS */
-
- if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- pr_err("dev[%p]: Unable to change SE Device"
- " max_sectors while dev_export_obj: %d count exists\n",
- dev, atomic_read(&dev->dev_export_obj.obj_access_count));
- return -EINVAL;
- }
- if (!max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " max_sectors\n", dev);
- return -EINVAL;
- }
- if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed max_sectors: %u"
- " greater than TCM/SE_Device max_sectors:"
- " %u\n", dev, max_sectors,
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- } else {
- if (!force && (max_sectors >
- dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
- pr_err("dev[%p]: Passed max_sectors: %u"
- " greater than TCM/SE_Device max_sectors"
- ": %u, use force=1 to override.\n", dev,
- max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- max_sectors = se_dev_align_max_sectors(max_sectors,
- dev->se_sub_dev->se_dev_attrib.block_size);
-
- dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, max_sectors);
- return 0;
-}
-
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 85f14e0fe5d3..59577568f935 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -870,8 +870,9 @@ void transport_dump_dev_state(
*bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
atomic_read(&dev->execute_tasks), dev->queue_depth);
- *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
- dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
+ *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
+ dev->se_sub_dev->se_dev_attrib.block_size,
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -3498,7 +3499,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
BUG_ON(cmd->data_length % attr->block_size);
BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
- attr->max_sectors);
+ attr->hw_max_sectors);
}
atomic_inc(&cmd->t_fe_count);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 927771aff1f7..1fe9111f4dc1 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -708,7 +708,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
- u32 max_sectors;
u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;