Commit 7a85f889 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Jens Axboe

block: restore the meaning of rq->data_len to the true data length

The meaning of rq->data_len was changed to the length of an allocated
buffer from the true data length. It breaks SG_IO friends and
bsg. This patch restores the meaning of rq->data_len to the true data
length and adds rq->extra_len to store an extended length (due to
drain buffer and padding).

This patch also removes the code to update bio in blk_rq_map_user
introduced by the commit 40b01b9b.
The commit adjusts bio according to memory alignment
(queue_dma_alignment). However, memory alignment is NOT padding
alignment. This adjustment also breaks SG_IO friends and bsg. Padding
alignment needs to be fixed in a proper way (by a separate patch).
Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: default avatarJens Axboe <axboe@carl.home.kernel.dk>
parent 89b6e743
...@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq) ...@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->nr_hw_segments = 0; rq->nr_hw_segments = 0;
rq->ioprio = 0; rq->ioprio = 0;
rq->special = NULL; rq->special = NULL;
rq->raw_data_len = 0;
rq->buffer = NULL; rq->buffer = NULL;
rq->tag = -1; rq->tag = -1;
rq->errors = 0; rq->errors = 0;
...@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq) ...@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->cmd_len = 0; rq->cmd_len = 0;
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
rq->data_len = 0; rq->data_len = 0;
rq->extra_len = 0;
rq->sense_len = 0; rq->sense_len = 0;
rq->data = NULL; rq->data = NULL;
rq->sense = NULL; rq->sense = NULL;
...@@ -2018,7 +2018,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, ...@@ -2018,7 +2018,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->hard_cur_sectors = rq->current_nr_sectors; rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio); rq->buffer = bio_data(bio);
rq->raw_data_len = bio->bi_size;
rq->data_len = bio->bi_size; rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
......
...@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, ...@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio; rq->biotail->bi_next = bio;
rq->biotail = bio; rq->biotail = bio;
rq->raw_data_len += bio->bi_size;
rq->data_len += bio->bi_size; rq->data_len += bio->bi_size;
} }
return 0; return 0;
...@@ -151,11 +150,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -151,11 +150,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
*/ */
if (len & queue_dma_alignment(q)) { if (len & queue_dma_alignment(q)) {
unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
struct bio *bio = rq->biotail;
bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; rq->extra_len += pad_len;
bio->bi_size += pad_len;
rq->data_len += pad_len;
} }
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
......
...@@ -231,7 +231,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -231,7 +231,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
((unsigned long)q->dma_drain_buffer) & ((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1)); (PAGE_SIZE - 1));
nsegs++; nsegs++;
rq->data_len += q->dma_drain_size; rq->extra_len += q->dma_drain_size;
} }
if (sg) if (sg)
......
...@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, ...@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
} }
if (rq->next_rq) { if (rq->next_rq) {
hdr->dout_resid = rq->raw_data_len; hdr->dout_resid = rq->data_len;
hdr->din_resid = rq->next_rq->raw_data_len; hdr->din_resid = rq->next_rq->data_len;
blk_rq_unmap_user(bidi_bio); blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq); blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ) } else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->raw_data_len; hdr->din_resid = rq->data_len;
else else
hdr->dout_resid = rq->raw_data_len; hdr->dout_resid = rq->data_len;
/* /*
* If the request generated a negative error number, return it * If the request generated a negative error number, return it
......
...@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, ...@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0; hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status) if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK; hdr->info |= SG_INFO_CHECK;
hdr->resid = rq->raw_data_len; hdr->resid = rq->data_len;
hdr->sb_len_wr = 0; hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) { if (rq->sense_len && hdr->sbp) {
...@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, ...@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT); rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL; rq->data = NULL;
rq->raw_data_len = 0;
rq->data_len = 0; rq->data_len = 0;
rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = cmd; rq->cmd[0] = cmd;
......
...@@ -2538,7 +2538,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) ...@@ -2538,7 +2538,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
} }
qc->tf.command = ATA_CMD_PACKET; qc->tf.command = ATA_CMD_PACKET;
qc->nbytes = scsi_bufflen(scmd); qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
/* check whether ATAPI DMA is safe */ /* check whether ATAPI DMA is safe */
if (!using_pio && ata_check_atapi_dma(qc)) if (!using_pio && ata_check_atapi_dma(qc))
...@@ -2549,7 +2549,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) ...@@ -2549,7 +2549,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
* want to set it properly, and for DMA where it is * want to set it properly, and for DMA where it is
* effectively meaningless. * effectively meaningless.
*/ */
nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024);
/* Most ATAPI devices which honor transfer chunk size don't /* Most ATAPI devices which honor transfer chunk size don't
* behave according to the spec when odd chunk size which * behave according to the spec when odd chunk size which
...@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) ...@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* TODO: find out if we need to do more here to * TODO: find out if we need to do more here to
* cover scatter/gather case. * cover scatter/gather case.
*/ */
qc->nbytes = scsi_bufflen(scmd); qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
/* request result TF and be quiet about device error */ /* request result TF and be quiet about device error */
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
......
...@@ -216,8 +216,8 @@ struct request { ...@@ -216,8 +216,8 @@ struct request {
unsigned int cmd_len; unsigned int cmd_len;
unsigned char cmd[BLK_MAX_CDB]; unsigned char cmd[BLK_MAX_CDB];
unsigned int raw_data_len;
unsigned int data_len; unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len; unsigned int sense_len;
void *data; void *data;
void *sense; void *sense;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment