Commit ae03bf63 authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe

block: Use accessor functions for queue limits

Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent e1defc4f
...@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev, ...@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
bio->bi_sector = sector; bio->bi_sector = sector;
if (nr_sects > q->max_hw_sectors) { if (nr_sects > queue_max_hw_sectors(q)) {
bio->bi_size = q->max_hw_sectors << 9; bio->bi_size = queue_max_hw_sectors(q) << 9;
nr_sects -= q->max_hw_sectors; nr_sects -= queue_max_hw_sectors(q);
sector += q->max_hw_sectors; sector += queue_max_hw_sectors(q);
} else { } else {
bio->bi_size = nr_sects << 9; bio->bi_size = nr_sects << 9;
nr_sects = 0; nr_sects = 0;
......
...@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io; goto end_io;
} }
if (unlikely(nr_sectors > q->max_hw_sectors)) { if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n", printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
bio_sectors(bio), bio_sectors(bio),
q->max_hw_sectors); queue_max_hw_sectors(q));
goto end_io; goto end_io;
} }
...@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio); ...@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
*/ */
int blk_rq_check_limits(struct request_queue *q, struct request *rq) int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{ {
if (blk_rq_sectors(rq) > q->max_sectors || if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
blk_rq_bytes(rq) > q->max_hw_sectors << 9) { blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__); printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO; return -EIO;
} }
...@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) ...@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
* limitation. * limitation.
*/ */
blk_recalc_rq_segments(rq); blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > q->max_phys_segments || if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
rq->nr_phys_segments > q->max_hw_segments) { rq->nr_phys_segments > queue_max_hw_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__); printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO; return -EIO;
} }
......
...@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct bio *bio = NULL; struct bio *bio = NULL;
int ret; int ret;
if (len > (q->max_hw_sectors << 9)) if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL; return -EINVAL;
if (!len) if (!len)
return -EINVAL; return -EINVAL;
...@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, ...@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
struct bio *bio; struct bio *bio;
int ret; int ret;
if (len > (q->max_hw_sectors << 9)) if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL; return -EINVAL;
if (!len || !kbuf) if (!len || !kbuf)
return -EINVAL; return -EINVAL;
......
...@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* never considered part of another segment, since that * never considered part of another segment, since that
* might change with the bounce page. * might change with the bounce page.
*/ */
high = page_to_pfn(bv->bv_page) > q->bounce_pfn; high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv) if (high || highprv)
goto new_segment; goto new_segment;
if (cluster) { if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size) if (seg_size + bv->bv_len
> queue_max_segment_size(q))
goto new_segment; goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment; goto new_segment;
...@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0; return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size > if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
q->max_segment_size) queue_max_segment_size(q))
return 0; return 0;
if (!bio_has_data(bio)) if (!bio_has_data(bio))
...@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nbytes = bvec->bv_len; int nbytes = bvec->bv_len;
if (bvprv && cluster) { if (bvprv && cluster) {
if (sg->length + nbytes > q->max_segment_size) if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment; goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
...@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, ...@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{ {
int nr_phys_segs = bio_phys_segments(q, bio); int nr_phys_segs = bio_phys_segments(q, bio);
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
...@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors; unsigned short max_sectors;
if (unlikely(blk_pc_request(req))) if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors; max_sectors = queue_max_hw_sectors(q);
else else
max_sectors = q->max_sectors; max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE; req->cmd_flags |= REQ_NOMERGE;
...@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, ...@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors; unsigned short max_sectors;
if (unlikely(blk_pc_request(req))) if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors; max_sectors = queue_max_hw_sectors(q);
else else
max_sectors = q->max_sectors; max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
...@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/* /*
* Will it become too large? * Will it become too large?
*/ */
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0; return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
...@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--; total_phys_segments--;
} }
if (total_phys_segments > q->max_phys_segments) if (total_phys_segments > queue_max_phys_segments(q))
return 0; return 0;
if (total_phys_segments > q->max_hw_segments) if (total_phys_segments > queue_max_hw_segments(q))
return 0; return 0;
/* Merge is OK... */ /* Merge is OK... */
......
...@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
} }
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
else
q->max_hw_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/** /**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue * blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device * @q: the request queue for the device
...@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q, ...@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed, dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size) void *buf, unsigned int size)
{ {
if (q->max_hw_segments < 2 || q->max_phys_segments < 2) if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL; return -EINVAL;
/* make room for appending the drain */ /* make room for appending the drain */
--q->max_hw_segments; blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
--q->max_phys_segments; blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed; q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf; q->dma_drain_buffer = buf;
q->dma_drain_size = size; q->dma_drain_size = size;
......
...@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ...@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{ {
int max_sectors_kb = q->max_sectors >> 1; int max_sectors_kb = queue_max_sectors(q) >> 1;
return queue_var_show(max_sectors_kb, (page)); return queue_var_show(max_sectors_kb, (page));
} }
...@@ -109,7 +109,7 @@ static ssize_t ...@@ -109,7 +109,7 @@ static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{ {
unsigned long max_sectors_kb, unsigned long max_sectors_kb,
max_hw_sectors_kb = q->max_hw_sectors >> 1, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10); page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count); ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
...@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) ...@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL; return -EINVAL;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
q->max_sectors = max_sectors_kb << 1; blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
...@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) ...@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{ {
int max_hw_sectors_kb = q->max_hw_sectors >> 1; int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
return queue_var_show(max_hw_sectors_kb, (page)); return queue_var_show(max_hw_sectors_kb, (page));
} }
......
...@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_int(arg, bdev_logical_block_size(bdev)); return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET: case BLKSECTGET:
return compat_put_ushort(arg, return compat_put_ushort(arg,
bdev_get_queue(bdev)->max_sectors); queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */ case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET: case BLKFRASET:
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
......
...@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, ...@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_private = &wait; bio->bi_private = &wait;
bio->bi_sector = start; bio->bi_sector = start;
if (len > q->max_hw_sectors) { if (len > queue_max_hw_sectors(q)) {
bio->bi_size = q->max_hw_sectors << 9; bio->bi_size = queue_max_hw_sectors(q) << 9;
len -= q->max_hw_sectors; len -= queue_max_hw_sectors(q);
start += q->max_hw_sectors; start += queue_max_hw_sectors(q);
} else { } else {
bio->bi_size = len << 9; bio->bi_size = len << 9;
len = 0; len = 0;
...@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKSSZGET: /* get block device hardware sector size */ case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_logical_block_size(bdev)); return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET: case BLKSECTGET:
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: case BLKRASET:
case BLKFRASET: case BLKFRASET:
if(!capable(CAP_SYS_ADMIN)) if(!capable(CAP_SYS_ADMIN))
......
...@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p) ...@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
static int sg_get_reserved_size(struct request_queue *q, int __user *p) static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{ {
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
return put_user(val, p); return put_user(val, p);
} }
...@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p) ...@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
if (size < 0) if (size < 0)
return -EINVAL; return -EINVAL;
if (size > (q->max_sectors << 9)) if (size > (queue_max_sectors(q) << 9))
size = q->max_sectors << 9; size = queue_max_sectors(q) << 9;
q->sg_reserved_size = size; q->sg_reserved_size = size;
return 0; return 0;
...@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->cmd_len > BLK_MAX_CDB) if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL; return -EINVAL;
if (hdr->dxfer_len > (q->max_hw_sectors << 9)) if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO; return -EIO;
if (hdr->dxfer_len) if (hdr->dxfer_len)
......
...@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) ...@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/ */
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{ {
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { if ((pd->settings.size << 9) / CD_FRAMESIZE
<= queue_max_phys_segments(q)) {
/* /*
* The cdrom device can handle one segment/frame * The cdrom device can handle one segment/frame
*/ */
clear_bit(PACKET_MERGE_SEGS, &pd->flags); clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0; return 0;
} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { } else if ((pd->settings.size << 9) / PAGE_SIZE
<= queue_max_phys_segments(q)) {
/* /*
* We can handle this case at the expense of some extra memory * We can handle this case at the expense of some extra memory
* copies during write operations * copies during write operations
......
...@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, ...@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes; nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE) if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1; nr = 1;
if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW; len = nr * CD_FRAMESIZE_RAW;
......
...@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) ...@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low() * combine_restrictions_low()
*/ */
rs->max_sectors = rs->max_sectors =
min_not_zero(rs->max_sectors, q->max_sectors); min_not_zero(rs->max_sectors, queue_max_sectors(q));
/* /*
* Check if merge fn is supported. * Check if merge fn is supported.
...@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) ...@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_phys_segments = rs->max_phys_segments =
min_not_zero(rs->max_phys_segments, min_not_zero(rs->max_phys_segments,
q->max_phys_segments); queue_max_phys_segments(q));
rs->max_hw_segments = rs->max_hw_segments =
min_not_zero(rs->max_hw_segments, q->max_hw_segments); min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
rs->logical_block_size = max(rs->logical_block_size, rs->logical_block_size = max(rs->logical_block_size,
queue_logical_block_size(q)); queue_logical_block_size(q));
rs->max_segment_size = rs->max_segment_size =
min_not_zero(rs->max_segment_size, q->max_segment_size); min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors = rs->max_hw_sectors =
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask = rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask, min_not_zero(rs->seg_boundary_mask,
q->seg_boundary_mask); queue_segment_boundary(q));
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
} }
...@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) ...@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions. * restrictions.
*/ */
blk_queue_max_sectors(q, t->limits.max_sectors); blk_queue_max_sectors(q, t->limits.max_sectors);
q->max_phys_segments = t->limits.max_phys_segments; blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
q->max_hw_segments = t->limits.max_hw_segments; blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
q->logical_block_size = t->limits.logical_block_size; blk_queue_logical_block_size(q, t->limits.logical_block_size);
q->max_segment_size = t->limits.max_segment_size; blk_queue_max_segment_size(q, t->limits.max_segment_size);
q->max_hw_sectors = t->limits.max_hw_sectors; blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
q->seg_boundary_mask = t->limits.seg_boundary_mask; blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
q->bounce_pfn = t->limits.bounce_pfn; blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster) if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
......
...@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation. * a one page request is never in violation.
*/ */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors; disk->num_sectors = rdev->sectors;
......
...@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* merge_bvec_fn will be involved in multipath.) * merge_bvec_fn will be involved in multipath.)
*/ */
if (q->merge_bvec_fn && if (q->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++; conf->working_disks++;
...@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev) ...@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
* violating it, not that we ever expect a device with * violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */ * a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags)) if (!test_bit(Faulty, &rdev->flags))
......
...@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
*/ */
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors)) if (!smallest || (rdev1->sectors < smallest->sectors))
......
...@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation. * a one page request is never in violation.
*/ */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0; p->head_position = 0;
...@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev) ...@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
* a one page request is never in violation. * a one page request is never in violation.
*/ */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0; disk->head_position = 0;
......
...@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation. * a one page request is never in violation.
*/ */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
mddev->queue->max_sectors = (PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0; p->head_position = 0;
rdev->raid_disk = mirror; rdev->raid_disk = mirror;
...@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev) ...@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
* a one page request is never in violation. * a one page request is never in violation.
*/ */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn && if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
mddev->queue->max_sectors = (PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0; disk->head_position = 0;
} }
......
...@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi) ...@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
{ {
struct request_queue *q = bdev_get_queue(bi->bi_bdev); struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors) if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0; return 0;
blk_recount_segments(q, bi); blk_recount_segments(q, bi);
if (bi->bi_phys_segments > q->max_phys_segments) if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0; return 0;
if (q->merge_bvec_fn) if (q->merge_bvec_fn)
......
...@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp) ...@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0; sdp->sgdebug = 0;
q = sdp->device->request_queue; q = sdp->device->request_queue;
sdp->sg_tablesize = min(q->max_hw_segments, sdp->sg_tablesize = min(queue_max_hw_segments(q),
q->max_phys_segments); queue_max_phys_segments(q));
} }
if ((sfp = sg_add_sfp(sdp, dev))) if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp; filp->private_data = sfp;
...@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp, ...@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
if (val < 0) if (val < 0)
return -EINVAL;