dm: implement REQ_FLUSH/FUA support for bio-based dm
This patch converts bio-based dm to support REQ_FLUSH/FUA instead of now deprecated REQ_HARDBARRIER. * -EOPNOTSUPP handling logic dropped. * Preflush is handled as before but postflush is dropped and replaced with passing down REQ_FUA to member request_queues. This replaces one array wide cache flush w/ member specific FUA writes. * __split_and_process_bio() now calls __clone_and_map_flush() directly for flushes and guarantees all FLUSH bio's going to targets are zero ` length. * It's now guaranteed that all FLUSH bio's which are passed onto dm targets are zero length. bio_empty_barrier() tests are replaced with REQ_FLUSH tests. * Empty WRITE_BARRIERs are replaced with WRITE_FLUSHes. * Dropped unlikely() around REQ_FLUSH tests. Flushes are not unlikely enough to be marked with unlikely(). * Block layer now filters out REQ_FLUSH/FUA bio's if the request_queue doesn't support cache flushing. Advertise REQ_FLUSH | REQ_FUA capability. * Request based dm isn't converted yet. dm_init_request_based_queue() resets flush support to 0 for now. To avoid disturbing request based dm code, dm->flush_error is added for bio based dm while requested based dm continues to use dm->barrier_error. Lightly tested linear, stripe, raid1, snap and crypt targets. Please proceed with caution as I'm not familiar with the code base. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: dm-devel@redhat.com Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
@@ -259,7 +259,7 @@ static int mirror_flush(struct dm_target *ti)
|
||||
struct dm_io_region io[ms->nr_mirrors];
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = WRITE_BARRIER,
|
||||
.bi_rw = WRITE_FLUSH,
|
||||
.mem.type = DM_IO_KMEM,
|
||||
.mem.ptr.bvec = NULL,
|
||||
.client = ms->io_client,
|
||||
@@ -629,7 +629,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
struct dm_io_region io[ms->nr_mirrors], *dest = io;
|
||||
struct mirror *m;
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
|
||||
.bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
|
||||
.mem.type = DM_IO_BVEC,
|
||||
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
|
||||
.notify.fn = write_callback,
|
||||
@@ -670,7 +670,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
|
||||
bio_list_init(&requeue);
|
||||
|
||||
while ((bio = bio_list_pop(writes))) {
|
||||
if (unlikely(bio_empty_barrier(bio))) {
|
||||
if (bio->bi_rw & REQ_FLUSH) {
|
||||
bio_list_add(&sync, bio);
|
||||
continue;
|
||||
}
|
||||
@@ -1203,7 +1203,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
|
||||
* We need to dec pending if this was a write.
|
||||
*/
|
||||
if (rw == WRITE) {
|
||||
if (likely(!bio_empty_barrier(bio)))
|
||||
if (!(bio->bi_rw & REQ_FLUSH))
|
||||
dm_rh_dec(ms->rh, map_context->ll);
|
||||
return error;
|
||||
}
|
||||
|
Reference in New Issue
Block a user