summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 14:41:02 +0900
committerTejun Heo <tj@kernel.org>2009-08-14 14:45:31 +0900
commit384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch)
tree04c93f391a1b65c8bf8d7ba8643c07d26c26590a /block
parenta76761b621bcd8336065c4fe3a74f046858bc34c (diff)
parent142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff)
downloadkernel-crypto-384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c.tar.gz
kernel-crypto-384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c.tar.xz
kernel-crypto-384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c.zip
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig11
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-integrity.c1
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c84
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c13
-rw-r--r--block/scsi_ioctl.c1
9 files changed, 94 insertions, 54 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 95a86adc33a..9be0b56eaee 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -48,9 +48,9 @@ config LBDAF
If unsure, say Y.
config BLK_DEV_BSG
- bool "Block layer SG support v4 (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- ---help---
+ bool "Block layer SG support v4"
+ default y
+ help
Saying Y here will enable generic SG (SCSI generic) v4 support
for any block device.
@@ -60,7 +60,10 @@ config BLK_DEV_BSG
protocols (e.g. Task Management Functions and SMP in Serial
Attached SCSI).
- If unsure, say N.
+ This option is required by recent UDEV versions to properly
+ access device serial numbers, etc.
+
+ If unsure, say Y.
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435c6ea..e3299a77a0d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
return NULL;
}
- /*
- * if caller didn't supply a lock, they get per-queue locking with
- * our embedded lock
- */
- if (!lock)
- lock = &q->__queue_lock;
-
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
@@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
-EXPORT_SYMBOL_GPL(blk_end_request);
+EXPORT_SYMBOL(blk_end_request);
/**
* blk_end_request_all - Helper function for drives to finish the request.
@@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error)
pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
BUG_ON(pending);
}
-EXPORT_SYMBOL_GPL(blk_end_request_all);
+EXPORT_SYMBOL(blk_end_request_all);
/**
* blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error)
{
return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(blk_end_request_cur);
+EXPORT_SYMBOL(blk_end_request_cur);
/**
* __blk_end_request - Helper function for drivers to complete the request.
@@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
-EXPORT_SYMBOL_GPL(__blk_end_request);
+EXPORT_SYMBOL(__blk_end_request);
/**
* __blk_end_request_all - Helper function for drives to finish the request.
@@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error)
pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
BUG_ON(pending);
}
-EXPORT_SYMBOL_GPL(__blk_end_request_all);
+EXPORT_SYMBOL(__blk_end_request_all);
/**
* __blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error)
{
return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(__blk_end_request_cur);
+EXPORT_SYMBOL(__blk_end_request_cur);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 73e28d35568..15c630813b1 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk)
kobject_uevent(&bi->kobj, KOBJ_REMOVE);
kobject_del(&bi->kobj);
+ kobject_put(&bi->kobj);
kmem_cache_free(integrity_cachep, bi);
disk->integrity = NULL;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba..e1999679a4d 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (blk_integrity_rq(req) != blk_integrity_rq(next))
return 0;
+ /* don't merge requests of different failfast settings */
+ if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
+ blk_failfast_transport(req) != blk_failfast_transport(next) ||
+ blk_failfast_driver(req) != blk_failfast_driver(next))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
diff --git a/block/blk-settings.c b/block/blk-settings.c
index bd582a7f531..476d8706507 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+#include <linux/gcd.h>
#include "blk.h"
@@ -165,6 +166,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_set_default_limits(&q->limits);
/*
+ * If the caller didn't supply a lock, fall back to our embedded
+ * per-queue locks
+ */
+ if (!q->queue_lock)
+ q->queue_lock = &q->__queue_lock;
+
+ /*
* by default assume old behaviour and bounce for any highmem page
*/
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
@@ -377,8 +385,8 @@ void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
EXPORT_SYMBOL(blk_queue_alignment_offset);
/**
- * blk_queue_io_min - set minimum request size for the queue
- * @q: the request queue for the device
+ * blk_limits_io_min - set minimum request size for a device
+ * @limits: the queue limits
* @min: smallest I/O size in bytes
*
* Description:
@@ -387,15 +395,35 @@ EXPORT_SYMBOL(blk_queue_alignment_offset);
* smallest I/O the device can perform without incurring a performance
* penalty.
*/
-void blk_queue_io_min(struct request_queue *q, unsigned int min)
+void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
{
- q->limits.io_min = min;
+ limits->io_min = min;
- if (q->limits.io_min < q->limits.logical_block_size)
- q->limits.io_min = q->limits.logical_block_size;
+ if (limits->io_min < limits->logical_block_size)
+ limits->io_min = limits->logical_block_size;
- if (q->limits.io_min < q->limits.physical_block_size)
- q->limits.io_min = q->limits.physical_block_size;
+ if (limits->io_min < limits->physical_block_size)
+ limits->io_min = limits->physical_block_size;
+}
+EXPORT_SYMBOL(blk_limits_io_min);
+
+/**
+ * blk_queue_io_min - set minimum request size for the queue
+ * @q: the request queue for the device
+ * @min: smallest I/O size in bytes
+ *
+ * Description:
+ * Storage devices may report a granularity or preferred minimum I/O
+ * size which is the smallest request the device can perform without
+ * incurring a performance penalty. For disk drives this is often the
+ * physical block size. For RAID arrays it is often the stripe chunk
+ * size. A properly aligned multiple of minimum_io_size is the
+ * preferred request size for workloads where a high number of I/O
+ * operations is desired.
+ */
+void blk_queue_io_min(struct request_queue *q, unsigned int min)
+{
+ blk_limits_io_min(&q->limits, min);
}
EXPORT_SYMBOL(blk_queue_io_min);
@@ -405,8 +433,12 @@ EXPORT_SYMBOL(blk_queue_io_min);
* @opt: optimal request size in bytes
*
* Description:
- * Drivers can call this function to set the preferred I/O request
- * size for devices that report such a value.
+ * Storage devices may report an optimal I/O size, which is the
+ * device's preferred unit for sustained I/O. This is rarely reported
+ * for disk drives. For RAID arrays it is usually the stripe width or
+ * the internal track size. A properly aligned multiple of
+ * optimal_io_size is the preferred request size for workloads where
+ * sustained throughput is desired.
*/
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
@@ -426,27 +458,7 @@ EXPORT_SYMBOL(blk_queue_io_opt);
**/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
- /* zero is "infinity" */
- t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
- queue_max_sectors(b));
-
- t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
- queue_max_hw_sectors(b));
-
- t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
- queue_segment_boundary(b));
-
- t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
- queue_max_phys_segments(b));
-
- t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
- queue_max_hw_segments(b));
-
- t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
- queue_max_segment_size(b));
-
- t->limits.logical_block_size = max(queue_logical_block_size(t),
- queue_logical_block_size(b));
+ blk_stack_limits(&t->limits, &b->limits, 0);
if (!t->queue_lock)
WARN_ON_ONCE(1);
@@ -516,6 +528,16 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
return -1;
}
+ /* Find lcm() of optimal I/O size */
+ if (t->io_opt && b->io_opt)
+ t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
+ else if (b->io_opt)
+ t->io_opt = b->io_opt;
+
+ /* Verify that optimal I/O size is a multiple of io_min */
+ if (t->io_min && t->io_opt % t->io_min)
+ return -1;
+
return 0;
}
EXPORT_SYMBOL(blk_stack_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b1cd04087d6..418d6361968 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -16,9 +16,9 @@ struct queue_sysfs_entry {
};
static ssize_t
-queue_var_show(unsigned int var, char *page)
+queue_var_show(unsigned long var, char *page)
{
- return sprintf(page, "%d\n", var);
+ return sprintf(page, "%lu\n", var);
}
static ssize_t
@@ -77,7 +77,8 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
- int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+ unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+ (PAGE_CACHE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
}
@@ -189,9 +190,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
{
- unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+ bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
- return queue_var_show(set != 0, page);
+ return queue_var_show(set, page);
}
static ssize_t
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 85208dd1d05..1b2d12cda43 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2311,7 +2311,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
goto queue_fail;
cfqq = cic_to_cfqq(cic, is_sync);
- if (!cfqq) {
+ if (!cfqq || cfqq == &cfqd->oom_cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
}
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba4..2d511f9105e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,19 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0;
+ /*
+ * Don't merge if failfast settings don't match.
+ *
+ * FIXME: The negation in front of each condition is necessary
+ * because bio and request flags use different bit positions
+ * and the accessors return those bits directly. This
+ * ugliness will soon go away.
+ */
+ if (!bio_failfast_dev(bio) != !blk_failfast_dev(rq) ||
+ !bio_failfast_transport(bio) != !blk_failfast_transport(rq) ||
+ !bio_failfast_driver(bio) != !blk_failfast_driver(rq))
+ return 0;
+
if (!elv_iosched_allow_merge(rq, bio))
return 0;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index f0e0ce0a607..e5b10017a50 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -680,3 +680,4 @@ int __init blk_scsi_ioctl_init(void)
blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
return 0;
}
+fs_initcall(blk_scsi_ioctl_init);