summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-merge.c94
-rw-r--r--block/blk-timeout.c9
-rw-r--r--block/blktrace.c2
-rw-r--r--block/bsg.c17
-rw-r--r--block/genhd.c24
5 files changed, 96 insertions, 50 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b92f5b0866b..a104593e70c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -38,72 +38,84 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
}
}
-void blk_recalc_rq_segments(struct request *rq)
+static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
+ struct bio *bio,
+ unsigned int *seg_size_ptr)
{
- int nr_phys_segs;
unsigned int phys_size;
struct bio_vec *bv, *bvprv = NULL;
- int seg_size;
- int cluster;
- struct req_iterator iter;
- int high, highprv = 1;
- struct request_queue *q = rq->q;
+ int cluster, i, high, highprv = 1;
+ unsigned int seg_size, nr_phys_segs;
+ struct bio *fbio;
- if (!rq->bio)
- return;
+ if (!bio)
+ return 0;
+ fbio = bio;
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
seg_size = 0;
phys_size = nr_phys_segs = 0;
- rq_for_each_segment(bv, rq, iter) {
- /*
- * the trick here is making sure that a high page is never
- * considered part of another segment, since that might
- * change with the bounce page.
- */
- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
- if (high || highprv)
- goto new_segment;
- if (cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
- goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ for_each_bio(bio) {
+ bio_for_each_segment(bv, bio, i) {
+ /*
+ * the trick here is making sure that a high page is
+ * never considered part of another segment, since that
+ * might change with the bounce page.
+ */
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+ if (high || highprv)
goto new_segment;
+ if (cluster) {
+ if (seg_size + bv->bv_len > q->max_segment_size)
+ goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ goto new_segment;
+
+ seg_size += bv->bv_len;
+ bvprv = bv;
+ continue;
+ }
+new_segment:
+ if (nr_phys_segs == 1 && seg_size >
+ fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
- seg_size += bv->bv_len;
+ nr_phys_segs++;
bvprv = bv;
- continue;
+ seg_size = bv->bv_len;
+ highprv = high;
}
-new_segment:
- if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
- rq->bio->bi_seg_front_size = seg_size;
-
- nr_phys_segs++;
- bvprv = bv;
- seg_size = bv->bv_len;
- highprv = high;
}
- if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+ if (seg_size_ptr)
+ *seg_size_ptr = seg_size;
+
+ return nr_phys_segs;
+}
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+ unsigned int seg_size = 0, phys_segs;
+
+ phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
+
+ if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
rq->bio->bi_seg_front_size = seg_size;
if (seg_size > rq->biotail->bi_seg_back_size)
rq->biotail->bi_seg_back_size = seg_size;
- rq->nr_phys_segments = nr_phys_segs;
+ rq->nr_phys_segments = phys_segs;
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- struct request rq;
struct bio *nxt = bio->bi_next;
- rq.q = q;
- rq.bio = rq.biotail = bio;
+
bio->bi_next = NULL;
- blk_recalc_rq_segments(&rq);
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
bio->bi_next = nxt;
- bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index a09535377a9..bbbdc4b8ccf 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -209,12 +209,19 @@ void blk_abort_queue(struct request_queue *q)
{
unsigned long flags;
struct request *rq, *tmp;
+ LIST_HEAD(list);
spin_lock_irqsave(q->queue_lock, flags);
elv_abort_queue(q);
- list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
+ /*
+ * Splice entries to local list, to avoid deadlocking if entries
+ * get readded to the timeout list by error handling
+ */
+ list_splice_init(&q->timeout_list, &list);
+
+ list_for_each_entry_safe(rq, tmp, &list, timeout_list)
blk_abort_request(rq);
spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/blktrace.c b/block/blktrace.c
index 39cc3bfe56e..7cf9d1ff45a 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -142,7 +142,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
what |= ddir_act[rw & WRITE];
what |= MASK_TC_BIT(rw, BARRIER);
- what |= MASK_TC_BIT(rw, SYNC);
+ what |= MASK_TC_BIT(rw, SYNCIO);
what |= MASK_TC_BIT(rw, AHEAD);
what |= MASK_TC_BIT(rw, META);
what |= MASK_TC_BIT(rw, DISCARD);
diff --git a/block/bsg.c b/block/bsg.c
index d414bb5607e..0ce8806dd0c 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -244,7 +244,8 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
* map sg_io_v4 to a request.
*/
static struct request *
-bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
+ u8 *sense)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
@@ -306,6 +307,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
if (ret)
goto out;
}
+
+ rq->sense = sense;
+ rq->sense_len = 0;
+
return rq;
out:
if (rq->cmd != rq->__cmd)
@@ -348,9 +353,6 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
- rq->sense = bc->sense;
- rq->sense_len = 0;
-
/*
* add bc command to busy queue and submit rq for io
*/
@@ -419,7 +421,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
{
int ret = 0;
- dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
+ dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
/*
* fill in all the output members
*/
@@ -635,7 +637,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
/*
* get a request, fill in the blanks, and add to request queue
*/
- rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
+ rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
@@ -922,11 +924,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct request *rq;
struct bio *bio, *bidi_bio = NULL;
struct sg_io_v4 hdr;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
- rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
+ rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/block/genhd.c b/block/genhd.c
index 397960cf26a..a9ec910974c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -256,6 +256,22 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
}
#endif /* CONFIG_PROC_FS */
+/**
+ * register_blkdev - register a new block device
+ *
+ * @major: the requested major device number [1..255]. If @major=0, try to
+ * allocate any unused major number.
+ * @name: the name of the new block device as a zero terminated string
+ *
+ * The @name must be unique within the system.
+ *
+ * The return value depends on the @major input parameter.
+ * - if a major device number was requested in range [1..255] then the
+ * function returns zero on success, or a negative error code
+ * - if any unused major number was requested with @major=0 parameter
+ * then the return value is the allocated major number in range
+ * [1..255] or a negative error code otherwise
+ */
int register_blkdev(unsigned int major, const char *name)
{
struct blk_major_name **n, *p;
@@ -1087,6 +1103,14 @@ dev_t blk_lookup_devt(const char *name, int partno)
if (strcmp(dev_name(dev), name))
continue;
+ if (partno < disk->minors) {
+ /* We need to return the right devno, even
+ * if the partition doesn't exist yet.
+ */
+ devt = MKDEV(MAJOR(dev->devt),
+ MINOR(dev->devt) + partno);
+ break;
+ }
part = disk_get_part(disk, partno);
if (part) {
devt = part_devt(part);