Commit 4246a0b6 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add a bi_error field to struct bio

Currently we have two different ways to signal an I/O error on a BIO:

 (1) by clearing the BIO_UPTODATE flag
 (2) by returning a Linux errno value to the bi_end_io callback

The first one has the drawback of only communicating a single possible
error (-EIO), and the second one has the drawback of not beeing persistent
when bios are queued up, and are not passed along from child to parent
bio in the ever more popular chaining scenario.  Having both mechanisms
available has the additional drawback of utterly confusing driver authors
and introducing bugs where various I/O submitters only deal with one of
them, and the others have to add boilerplate code to deal with both kinds
of error returns.

So add a new bi_error field to store an errno value directly in struct
bio and remove the existing mechanisms to clean all this up.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 0034af03
......@@ -1109,7 +1109,7 @@ it will loop and handle as many sectors (on a bio-segment granularity)
as specified.
Now bh->b_end_io is replaced by bio->bi_end_io, but most of the time the
right thing to use is bio_endio(bio, uptodate) instead.
right thing to use is bio_endio(bio) instead.
If the driver is dropping the io_request_lock from its request_fn strategy,
then it just needs to replace that with q->queue_lock instead.
......
......@@ -76,7 +76,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
bvec_to_phys(&bvec));
sec += len;
}
bio_endio(bio, 0);
bio_endio(bio);
}
static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
......
......@@ -132,7 +132,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
phys_mem += vec.bv_len;
transfered += vec.bv_len;
}
bio_endio(bio, 0);
bio_endio(bio);
}
/**
......
......@@ -101,8 +101,9 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
spin_unlock(&dev->lock);
}
static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
static void simdisk_make_request(struct request_queue *q, struct bio *bio)
{
struct simdisk *dev = q->queuedata;
struct bio_vec bvec;
struct bvec_iter iter;
sector_t sector = bio->bi_iter.bi_sector;
......@@ -116,17 +117,10 @@ static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
sector += len;
__bio_kunmap_atomic(buffer);
}
return 0;
}
static void simdisk_make_request(struct request_queue *q, struct bio *bio)
{
struct simdisk *dev = q->queuedata;
int status = simdisk_xfer_bio(dev, bio);
bio_endio(bio, status);
bio_endio(bio);
}
static int simdisk_open(struct block_device *bdev, fmode_t mode)
{
struct simdisk *dev = bdev->bd_disk->private_data;
......
......@@ -355,13 +355,12 @@ static void bio_integrity_verify_fn(struct work_struct *work)
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
int error;
error = bio_integrity_process(bio, bi->verify_fn);
bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
bio_endio(bio);
}
/**
......@@ -376,7 +375,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
* in process context. This function postpones completion
* accordingly.
*/
void bio_integrity_endio(struct bio *bio, int error)
void bio_integrity_endio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
......@@ -386,9 +385,9 @@ void bio_integrity_endio(struct bio *bio, int error)
* integrity metadata. Restore original bio end_io handler
* and run it.
*/
if (error) {
if (bio->bi_error) {
bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error);
bio_endio(bio);
return;
}
......
......@@ -269,7 +269,6 @@ static void bio_free(struct bio *bio)
void bio_init(struct bio *bio)
{
memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE;
atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1);
}
......@@ -292,14 +291,17 @@ void bio_reset(struct bio *bio)
__bio_free(bio);
memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags | (1 << BIO_UPTODATE);
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1);
}
EXPORT_SYMBOL(bio_reset);
static void bio_chain_endio(struct bio *bio, int error)
static void bio_chain_endio(struct bio *bio)
{
bio_endio(bio->bi_private, error);
struct bio *parent = bio->bi_private;
parent->bi_error = bio->bi_error;
bio_endio(parent);
bio_put(bio);
}
......@@ -896,11 +898,11 @@ struct submit_bio_ret {
int error;
};
static void submit_bio_wait_endio(struct bio *bio, int error)
static void submit_bio_wait_endio(struct bio *bio)
{
struct submit_bio_ret *ret = bio->bi_private;
ret->error = error;
ret->error = bio->bi_error;
complete(&ret->event);
}
......@@ -1445,7 +1447,7 @@ void bio_unmap_user(struct bio *bio)
}
EXPORT_SYMBOL(bio_unmap_user);
static void bio_map_kern_endio(struct bio *bio, int err)
static void bio_map_kern_endio(struct bio *bio)
{
bio_put(bio);
}
......@@ -1501,13 +1503,13 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
}
EXPORT_SYMBOL(bio_map_kern);
static void bio_copy_kern_endio(struct bio *bio, int err)
static void bio_copy_kern_endio(struct bio *bio)
{
bio_free_pages(bio);
bio_put(bio);
}
static void bio_copy_kern_endio_read(struct bio *bio, int err)
static void bio_copy_kern_endio_read(struct bio *bio)
{
char *p = bio->bi_private;
struct bio_vec *bvec;
......@@ -1518,7 +1520,7 @@ static void bio_copy_kern_endio_read(struct bio *bio, int err)
p += bvec->bv_len;
}
bio_copy_kern_endio(bio, err);
bio_copy_kern_endio(bio);
}
/**
......@@ -1778,25 +1780,15 @@ static inline bool bio_remaining_done(struct bio *bio)
/**
* bio_endio - end I/O on a bio
* @bio: bio
* @error: error, if any
*
* Description:
* bio_endio() will end I/O on the whole bio. bio_endio() is the
* preferred way to end I/O on a bio, it takes care of clearing
* BIO_UPTODATE on error. @error is 0 on success, and and one of the
* established -Exxxx (-EIO, for instance) error values in case
* something went wrong. No one should call bi_end_io() directly on a
* bio unless they own it and thus know that it has an end_io
* function.
* bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
* way to end I/O on a bio. No one should call bi_end_io() directly on a
* bio unless they own it and thus know that it has an end_io function.
**/
void bio_endio(struct bio *bio, int error)
void bio_endio(struct bio *bio)
{
while (bio) {
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
if (unlikely(!bio_remaining_done(bio)))
break;
......@@ -1810,11 +1802,12 @@ void bio_endio(struct bio *bio, int error)
*/
if (bio->bi_end_io == bio_chain_endio) {
struct bio *parent = bio->bi_private;
parent->bi_error = bio->bi_error;
bio_put(bio);
bio = parent;
} else {
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
bio->bi_end_io(bio);
bio = NULL;
}
}
......
......@@ -143,9 +143,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
bio->bi_error = error;
if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags);
......@@ -154,7 +152,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
bio_endio(bio);
}
void blk_dump_rq_flags(struct request *rq, char *msg)
......@@ -1620,7 +1618,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_endio(bio, -EIO);
bio->bi_error = -EIO;
bio_endio(bio);
return;
}
......@@ -1673,7 +1672,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
bio_endio(bio, PTR_ERR(req)); /* @q is dead */
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}
......@@ -1896,7 +1896,8 @@ generic_make_request_checks(struct bio *bio)
return true;
end_io:
bio_endio(bio, err);
bio->bi_error = err;
bio_endio(bio);
return false;
}
......
......@@ -11,16 +11,16 @@
struct bio_batch {
atomic_t done;
unsigned long flags;
int error;
struct completion *wait;
};
static void bio_batch_end_io(struct bio *bio, int err)
static void bio_batch_end_io(struct bio *bio)
{
struct bio_batch *bb = bio->bi_private;
if (err && (err != -EOPNOTSUPP))
clear_bit(BIO_UPTODATE, &bb->flags);
if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
bb->error = bio->bi_error;
if (atomic_dec_and_test(&bb->done))
complete(bb->wait);
bio_put(bio);
......@@ -78,7 +78,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
}
atomic_set(&bb.done, 1);
bb.flags = 1 << BIO_UPTODATE;
bb.error = 0;
bb.wait = &wait;
blk_start_plug(&plug);
......@@ -134,9 +134,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -EIO;
if (bb.error)
return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
......@@ -172,7 +171,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
return -EOPNOTSUPP;
atomic_set(&bb.done, 1);
bb.flags = 1 << BIO_UPTODATE;
bb.error = 0;
bb.wait = &wait;
while (nr_sects) {
......@@ -208,9 +207,8 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -ENOTSUPP;
if (bb.error)
return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
......@@ -236,7 +234,7 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
DECLARE_COMPLETION_ONSTACK(wait);
atomic_set(&bb.done, 1);
bb.flags = 1 << BIO_UPTODATE;
bb.error = 0;
bb.wait = &wait;
ret = 0;
......@@ -270,10 +268,8 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
/* One of bios in the batch was completed with error.*/
ret = -EIO;
if (bb.error)
return bb.error;
return ret;
}
......
......@@ -103,7 +103,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio, 0);
bio_endio(bio);
__blk_rq_unmap_user(bio);
return -EINVAL;
}
......
......@@ -1199,7 +1199,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
struct blk_mq_alloc_data alloc_data;
if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
bio_endio(bio, -EIO);
bio_io_error(bio);
return NULL;
}
......@@ -1283,7 +1283,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_endio(bio, -EIO);
bio_io_error(bio);
return;
}
......@@ -1368,7 +1368,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio_endio(bio, -EIO);
bio_io_error(bio);
return;
}
......
......@@ -123,7 +123,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
}
}
static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
static void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
......@@ -141,39 +141,40 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
mempool_free(bvec->bv_page, pool);
}
bio_endio(bio_orig, err);
bio_orig->bi_error = bio->bi_error;
bio_endio(bio_orig);
bio_put(bio);
}
static void bounce_end_io_write(struct bio *bio, int err)
static void bounce_end_io_write(struct bio *bio)
{
bounce_end_io(bio, page_pool, err);
bounce_end_io(bio, page_pool);
}
static void bounce_end_io_write_isa(struct bio *bio, int err)
static void bounce_end_io_write_isa(struct bio *bio)
{
bounce_end_io(bio, isa_page_pool, err);
bounce_end_io(bio, isa_page_pool);
}
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
if (!bio->bi_error)
copy_to_high_bio_irq(bio_orig, bio);
bounce_end_io(bio, pool, err);
bounce_end_io(bio, pool);
}
static void bounce_end_io_read(struct bio *bio, int err)
static void bounce_end_io_read(struct bio *bio)
{
__bounce_end_io_read(bio, page_pool, err);
__bounce_end_io_read(bio, page_pool);
}
static void bounce_end_io_read_isa(struct bio *bio, int err)
static void bounce_end_io_read_isa(struct bio *bio)
{
__bounce_end_io_read(bio, isa_page_pool, err);
__bounce_end_io_read(bio, isa_page_pool);
}
#ifdef CONFIG_NEED_BOUNCE_POOL
......
......@@ -1110,7 +1110,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
d->ip.rq = NULL;
do {
bio = rq->bio;
bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
bok = !fastfail && !bio->bi_error;
} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
......@@ -1172,7 +1172,7 @@ ktiocomplete(struct frame *f)
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
noskb: if (buf)
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
buf->bio->bi_error = -EIO;
goto out;
}
......@@ -1185,7 +1185,7 @@ noskb: if (buf)
"aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor,
skb->len, n);
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
buf->bio->bi_error = -EIO;
break;
}
if (n > f->iter.bi_size) {
......@@ -1193,7 +1193,7 @@ noskb: if (buf)
"aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size);
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
buf->bio->bi_error = -EIO;
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
......@@ -1695,7 +1695,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
if (buf == NULL)
return;
buf->iter.bi_size = 0;
clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
buf->bio->bi_error = -EIO;
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
}
......
......@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
if (rq == NULL)
return;
while ((bio = d->ip.nxbio)) {
clear_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_error = -EIO;
d->ip.nxbio = bio->bi_next;
n = (unsigned long) rq->special;
rq->special = (void *) --n;
......
......@@ -331,14 +331,12 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
int err = -EIO;
sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto out;
goto io_error;
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
err = 0;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
......@@ -349,15 +347,20 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
err = brd_do_bvec(brd, bvec.bv_page, len,
bvec.bv_offset, rw, sector);
if (err)
break;
goto io_error;
sector += len >> SECTOR_SHIFT;
}
out:
bio_endio(bio, err);
bio_endio(bio);
return;
io_error:
bio_io_error(bio);
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
......
......@@ -175,11 +175,11 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
device->md_io.submit_jif = jiffies;
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
bio_io_error(bio);
else
submit_bio(rw, bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (bio_flagged(bio, BIO_UPTODATE))
if (!bio->bi_error)
err = device->md_io.error;
out:
......
......@@ -941,36 +941,27 @@ static void drbd_bm_aio_ctx_destroy(struct kref *kref)
}
/* bv_page may be a copy, or may be the original */
static void drbd_bm_endio(struct bio *bio, int error)
static void drbd_bm_endio(struct bio *bio)
{
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
/* strange behavior of some lower level drivers...
* fail the request by clearing the uptodate flag,
* but do not return any error?!
* do we want to WARN() on this? */
if (!error && !uptodate)
error = -EIO;
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (error) {
if (bio->bi_error) {
/* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */
ctx->error = error;
ctx->error = bio->bi_error;
bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
error, idx);
bio->bi_error, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
......@@ -1031,7 +1022,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio->bi_rw |= rw;
bio_endio(bio, -EIO);
bio_io_error(bio);
} else {
submit_bio(rw, bio);
/* this should not count as user activity and cause the
......
......@@ -1481,9 +1481,9 @@ extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
/* bi_end_io handlers */
extern void drbd_md_endio(struct bio *bio, int error);
extern void drbd_peer_request_endio(struct bio *bio, int error);
extern void drbd_request_endio(struct bio *bio, int error);
extern void drbd_md_endio(struct bio *bio);
extern void drbd_peer_request_endio(struct bio *bio);
extern void drbd_request_endio(struct bio *bio);
extern int drbd_worker(struct drbd_thread *thi);
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
void drbd_resync_after_changed(struct drbd_device *device);
......@@ -1604,12 +1604,13 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
__release(local);
if (!bio->bi_bdev) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
bio_endio(bio, -ENODEV);
bio->bi_error = -ENODEV;
bio_endio(bio);
return;
}
if (drbd_insert_fault(device, fault_type))
bio_endio(bio, -EIO);
bio_io_error(bio);
else
generic_make_request(bio);
}
......
......@@ -201,7 +201,8 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
bio_endio(m->bio, m->error);
m->bio->bi_error = m->error;
bio_endio(m->bio);
dec_ap_bio(device);
}
......@@ -1153,12 +1154,12 @@ drbd_submit_req_private_bio(struct drbd_request *req)
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_endio(bio, -EIO);
bio_io_error(bio);
else
generic_make_request(bio);
put_ldev(device);
} else
bio_endio(bio, -EIO);
bio_io_error(bio);
}
static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
......@@ -1191,7 +1192,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n");
bio_endio(bio, -ENOMEM);
bio->bi_error = -ENOMEM;
bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
req->start_jif = start_jif;
......
......@@ -65,12 +65,12 @@ rwlock_t global_state_lock;
/* used for synchronous meta data and bitmap IO
* submitted by drbd_md_sync_page_io()
*/
void drbd_md_endio(struct bio *bio, int error)
void drbd_md_endio(struct bio *bio)
{
struct drbd_device *device;
device = bio->bi_private;
device->md_io.error = error;
device->md_io.error = bio->bi_error;
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
......@@ -170,31 +170,20 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver.
*/
void drbd_peer_request_endio(struct bio *bio, int error)
void drbd_peer_request_endio(struct bio *bio)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
int is_discard = !!(bio->bi_rw & REQ_DISCARD);