<div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Fri, Apr 15, 2016 at 5:39 AM, <span dir="ltr"><<a href="mailto:mchristi@redhat.com" target="_blank">mchristi@redhat.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left-width:1px;border-left-style:solid;border-left-color:rgb(204,204,204);padding-left:1ex">From: Mike Christie <<a href="mailto:mchristi@redhat.com">mchristi@redhat.com</a>><br>
<br>
This patch has md use bio->bi_op for REQ_OPs and rq_flag_bits<br>
to bio->bi_rw.<br>
<br>
Signed-off-by: Mike Christie <<a href="mailto:mchristi@redhat.com">mchristi@redhat.com</a>><br>
Reviewed-by: Christoph Hellwig <<a href="mailto:hch@lst.de">hch@lst.de</a>><br>
Reviewed-by: Hannes Reinecke <<a href="mailto:hare@suse.com">hare@suse.com</a>><br>
---<br>
drivers/md/bitmap.c | 2 +-<br>
drivers/md/dm-raid.c | 5 +++--<br>
drivers/md/md.c | 11 +++++++----<br>
drivers/md/md.h | 3 ++-<br>
drivers/md/raid1.c | 34 ++++++++++++++++----------------<br>
drivers/md/raid10.c | 50 ++++++++++++++++++++++++++----------------------<br>
drivers/md/raid5-cache.c | 25 +++++++++++++++---------<br>
drivers/md/raid5.c | 48 ++++++++++++++++++++++++++--------------------<br>
8 files changed, 101 insertions(+), 77 deletions(-)<br>
<br></blockquote><div><br></div><div>Sorry I though this would thread propertly: </div><div><a href="https://lkml.kernel.org/r/1461452709-6702-1-git-send-email-shaun@tancheff.com">https://lkml.kernel.org/r/1461452709-6702-1-git-send-email-shaun@tancheff.com</a><br></div><div><br></div><div>In raid0.c, raid10.c and raid5.c</div><div><br></div><div><div>A couple of checks for REQ_PREFLUSH flag should also check for</div><div>bi_op matching REQ_OP_FLUSH.</div><div><br></div><div>In raid1.c [r1_sync_page_io()] and raid10.c [r10_sync_page_io()]</div><div><br></div><div>Wrappers for sync_page_io() are passed READ/WRITE but need to</div><div>be passed REQ_OP_READ and REQ_OP_WRITE.</div></div><div><br></div><div>Anyway my raid testing was getting weird hangs and corruption</div><div>without the patch.</div><div><br></div><div>Thanks!</div><div><br></div><div> </div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left-width:1px;border-left-style:solid;border-left-color:rgb(204,204,204);padding-left:1ex">
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c<br>
index 8b2e16f..9e8019e 100644<br>
--- a/drivers/md/bitmap.c<br>
+++ b/drivers/md/bitmap.c<br>
@@ -159,7 +159,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,<br>
<br>
if (sync_page_io(rdev, target,<br>
roundup(size, bdev_logical_block_size(rdev->bdev)),<br>
- page, READ, true)) {<br>
+ page, REQ_OP_READ, 0, true)) {<br>
page->index = index;<br>
return 0;<br>
}<br>
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c<br>
index a090121..43a749c 100644<br>
--- a/drivers/md/dm-raid.c<br>
+++ b/drivers/md/dm-raid.c<br>
@@ -792,7 +792,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)<br>
if (rdev->sb_loaded)<br>
return 0;<br>
<br>
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {<br>
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {<br>
DMERR("Failed to read superblock of device at position %d",<br>
rdev->raid_disk);<br>
md_error(rdev->mddev, rdev);<br>
@@ -1646,7 +1646,8 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)<br>
for (i = 0; i < rs->md.raid_disks; i++) {<br>
r = &rs->dev[i].rdev;<br>
if (test_bit(Faulty, &r->flags) && r->sb_page &&<br>
- sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {<br>
+ sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,<br>
+ 1)) {<br>
DMINFO("Faulty %s device #%d has readable super block."<br>
" Attempting to revive it.",<br>
rs->raid_type->name, i);<br>
diff --git a/drivers/md/md.c b/drivers/md/md.c<br>
index ec3c98d..9c40368 100644<br>
--- a/drivers/md/md.c<br>
+++ b/drivers/md/md.c<br>
@@ -392,6 +392,7 @@ static void submit_flushes(struct work_struct *ws)<br>
bi->bi_end_io = md_end_flush;<br>
bi->bi_private = rdev;<br>
bi->bi_bdev = rdev->bdev;<br>
+ bi->bi_op = REQ_OP_WRITE;<br>
bi->bi_rw = WRITE_FLUSH;<br>
atomic_inc(&mddev->flush_pending);<br>
submit_bio(bi);<br>
@@ -741,6 +742,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,<br>
bio_add_page(bio, page, size, 0);<br>
bio->bi_private = rdev;<br>
bio->bi_end_io = super_written;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_rw = WRITE_FLUSH_FUA;<br>
<br>
atomic_inc(&mddev->pending_writes);<br>
@@ -754,14 +756,15 @@ void md_super_wait(struct mddev *mddev)<br>
}<br>
<br>
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,<br>
- struct page *page, int rw, bool metadata_op)<br>
+ struct page *page, int op, int op_flags, bool metadata_op)<br>
{<br>
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);<br>
int ret;<br>
<br>
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?<br>
rdev->meta_bdev : rdev->bdev;<br>
- bio->bi_rw = rw;<br>
+ bio->bi_op = op;<br>
+ bio->bi_rw = op_flags;<br>
if (metadata_op)<br>
bio->bi_iter.bi_sector = sector + rdev->sb_start;<br>
else if (rdev->mddev->reshape_position != MaxSector &&<br>
@@ -787,7 +790,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)<br>
if (rdev->sb_loaded)<br>
return 0;<br>
<br>
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))<br>
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))<br>
goto fail;<br>
rdev->sb_loaded = 1;<br>
return 0;<br>
@@ -1473,7 +1476,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_<br>
return -EINVAL;<br>
bb_sector = (long long)offset;<br>
if (!sync_page_io(rdev, bb_sector, sectors << 9,<br>
- rdev->bb_page, READ, true))<br>
+ rdev->bb_page, REQ_OP_READ, 0, true))<br>
return -EIO;<br>
bbp = (u64 *)page_address(rdev->bb_page);<br>
rdev->badblocks.shift = sb->bblog_shift;<br>
diff --git a/drivers/md/md.h b/drivers/md/md.h<br>
index b5c4be7..2e0918f 100644<br>
--- a/drivers/md/md.h<br>
+++ b/drivers/md/md.h<br>
@@ -618,7 +618,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,<br>
sector_t sector, int size, struct page *page);<br>
extern void md_super_wait(struct mddev *mddev);<br>
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,<br>
- struct page *page, int rw, bool metadata_op);<br>
+ struct page *page, int op, int op_flags,<br>
+ bool metadata_op);<br>
extern void md_do_sync(struct md_thread *thread);<br>
extern void md_new_event(struct mddev *mddev);<br>
extern int md_allow_write(struct mddev *mddev);<br>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c<br>
index 424df7e..c7abd2d 100644<br>
--- a/drivers/md/raid1.c<br>
+++ b/drivers/md/raid1.c<br>
@@ -759,7 +759,7 @@ static void flush_pending_writes(struct r1conf *conf)<br>
while (bio) { /* submit pending writes */<br>
struct bio *next = bio->bi_next;<br>
bio->bi_next = NULL;<br>
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&<br>
+ if (unlikely((bio->bi_op == REQ_OP_DISCARD) &&<br>
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))<br>
/* Just ignore it */<br>
bio_endio(bio);<br>
@@ -1033,7 +1033,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)<br>
while (bio) { /* submit pending writes */<br>
struct bio *next = bio->bi_next;<br>
bio->bi_next = NULL;<br>
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&<br>
+ if (unlikely((bio->bi_op == REQ_OP_DISCARD) &&<br>
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))<br>
/* Just ignore it */<br>
bio_endio(bio);<br>
@@ -1053,12 +1053,11 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)<br>
int i, disks;<br>
struct bitmap *bitmap;<br>
unsigned long flags;<br>
+ const int op = bio->bi_op;<br>
const int rw = bio_data_dir(bio);<br>
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);<br>
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));<br>
- const unsigned long do_discard = (bio->bi_rw<br>
- & (REQ_DISCARD | REQ_SECURE));<br>
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);<br>
+ const unsigned long do_sec = (bio->bi_rw & REQ_SECURE);<br>
struct md_rdev *blocked_rdev;<br>
struct blk_plug_cb *cb;<br>
struct raid1_plug_cb *plug = NULL;<br>
@@ -1166,7 +1165,8 @@ read_again:<br>
mirror->rdev->data_offset;<br>
read_bio->bi_bdev = mirror->rdev->bdev;<br>
read_bio->bi_end_io = raid1_end_read_request;<br>
- read_bio->bi_rw = READ | do_sync;<br>
+ read_bio->bi_op = op;<br>
+ read_bio->bi_rw = do_sync;<br>
read_bio->bi_private = r1_bio;<br>
<br>
if (max_sectors < r1_bio->sectors) {<br>
@@ -1376,8 +1376,9 @@ read_again:<br>
conf->mirrors[i].rdev->data_offset);<br>
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;<br>
mbio->bi_end_io = raid1_end_write_request;<br>
+ mbio->bi_op = op;<br>
mbio->bi_rw =<br>
- WRITE | do_flush_fua | do_sync | do_discard | do_same;<br>
+ do_flush_fua | do_sync | do_sec;<br>
mbio->bi_private = r1_bio;<br>
<br>
atomic_inc(&r1_bio->remaining);<br>
@@ -1771,7 +1772,7 @@ static void end_sync_write(struct bio *bio)<br>
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,<br>
int sectors, struct page *page, int rw)<br>
{<br>
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))<br>
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))<br>
/* success */<br>
return 1;<br>
if (rw == WRITE) {<br>
@@ -1825,7 +1826,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)<br>
rdev = conf->mirrors[d].rdev;<br>
if (sync_page_io(rdev, sect, s<<9,<br>
bio->bi_io_vec[idx].bv_page,<br>
- READ, false)) {<br>
+ REQ_OP_READ, 0, false)) {<br>
success = 1;<br>
break;<br>
}<br>
@@ -2030,7 +2031,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)<br>
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))<br>
continue;<br>
<br>
- wbio->bi_rw = WRITE;<br>
+ wbio->bi_op = REQ_OP_WRITE;<br>
wbio->bi_end_io = end_sync_write;<br>
atomic_inc(&r1_bio->remaining);<br>
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));<br>
@@ -2090,7 +2091,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,<br>
is_badblock(rdev, sect, s,<br>
&first_bad, &bad_sectors) == 0 &&<br>
sync_page_io(rdev, sect, s<<9,<br>
- conf->tmppage, READ, false))<br>
+ conf->tmppage, REQ_OP_READ, 0, false))<br>
success = 1;<br>
else {<br>
d++;<br>
@@ -2201,7 +2202,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)<br>
wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);<br>
}<br>
<br>
- wbio->bi_rw = WRITE;<br>
+ wbio->bi_op = REQ_OP_WRITE;<br>
wbio->bi_iter.bi_sector = r1_bio->sector;<br>
wbio->bi_iter.bi_size = r1_bio->sectors << 9;<br>
<br>
@@ -2344,7 +2345,8 @@ read_more:<br>
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;<br>
bio->bi_bdev = rdev->bdev;<br>
bio->bi_end_io = raid1_end_read_request;<br>
- bio->bi_rw = READ | do_sync;<br>
+ bio->bi_op = REQ_OP_READ;<br>
+ bio->bi_rw = do_sync;<br>
bio->bi_private = r1_bio;<br>
if (max_sectors < r1_bio->sectors) {<br>
/* Drat - have to split this up more */<br>
@@ -2572,7 +2574,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
if (i < conf->raid_disks)<br>
still_degraded = 1;<br>
} else if (!test_bit(In_sync, &rdev->flags)) {<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_end_io = end_sync_write;<br>
write_targets ++;<br>
} else {<br>
@@ -2599,7 +2601,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
if (disk < 0)<br>
disk = i;<br>
}<br>
- bio->bi_rw = READ;<br>
+ bio->bi_op = REQ_OP_READ;<br>
bio->bi_end_io = end_sync_read;<br>
read_targets++;<br>
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&<br>
@@ -2611,7 +2613,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
* if we are doing resync or repair. Otherwise, leave<br>
* this device alone for this sync request.<br>
*/<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_end_io = end_sync_write;<br>
write_targets++;<br>
}<br>
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c<br>
index 4736be8..63cd985 100644<br>
--- a/drivers/md/raid10.c<br>
+++ b/drivers/md/raid10.c<br>
@@ -865,7 +865,7 @@ static void flush_pending_writes(struct r10conf *conf)<br>
while (bio) { /* submit pending writes */<br>
struct bio *next = bio->bi_next;<br>
bio->bi_next = NULL;<br>
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&<br>
+ if (unlikely((bio->bi_op == REQ_OP_DISCARD) &&<br>
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))<br>
/* Just ignore it */<br>
bio_endio(bio);<br>
@@ -1041,7 +1041,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)<br>
while (bio) { /* submit pending writes */<br>
struct bio *next = bio->bi_next;<br>
bio->bi_next = NULL;<br>
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&<br>
+ if (unlikely((bio->bi_op == REQ_OP_DISCARD) &&<br>
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))<br>
/* Just ignore it */<br>
bio_endio(bio);<br>
@@ -1058,12 +1058,11 @@ static void __make_request(struct mddev *mddev, struct bio *bio)<br>
struct r10bio *r10_bio;<br>
struct bio *read_bio;<br>
int i;<br>
+ const int op = bio->bi_op;<br>
const int rw = bio_data_dir(bio);<br>
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);<br>
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);<br>
- const unsigned long do_discard = (bio->bi_rw<br>
- & (REQ_DISCARD | REQ_SECURE));<br>
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);<br>
+ const unsigned long do_sec = (bio->bi_rw & REQ_SECURE);<br>
unsigned long flags;<br>
struct md_rdev *blocked_rdev;<br>
struct blk_plug_cb *cb;<br>
@@ -1156,7 +1155,8 @@ read_again:<br>
choose_data_offset(r10_bio, rdev);<br>
read_bio->bi_bdev = rdev->bdev;<br>
read_bio->bi_end_io = raid10_end_read_request;<br>
- read_bio->bi_rw = READ | do_sync;<br>
+ read_bio->bi_op = op;<br>
+ read_bio->bi_rw = do_sync;<br>
read_bio->bi_private = r10_bio;<br>
<br>
if (max_sectors < r10_bio->sectors) {<br>
@@ -1363,8 +1363,9 @@ retry_write:<br>
rdev));<br>
mbio->bi_bdev = rdev->bdev;<br>
mbio->bi_end_io = raid10_end_write_request;<br>
+ mbio->bi_op = op;<br>
mbio->bi_rw =<br>
- WRITE | do_sync | do_fua | do_discard | do_same;<br>
+ do_sync | do_fua | do_sec;<br>
mbio->bi_private = r10_bio;<br>
<br>
atomic_inc(&r10_bio->remaining);<br>
@@ -1406,8 +1407,9 @@ retry_write:<br>
r10_bio, rdev));<br>
mbio->bi_bdev = rdev->bdev;<br>
mbio->bi_end_io = raid10_end_write_request;<br>
+ mbio->bi_op = op;<br>
mbio->bi_rw =<br>
- WRITE | do_sync | do_fua | do_discard | do_same;<br>
+ do_sync | do_fua | do_sec;<br>
mbio->bi_private = r10_bio;<br>
<br>
atomic_inc(&r10_bio->remaining);<br>
@@ -1992,7 +1994,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)<br>
<br>
tbio->bi_vcnt = vcnt;<br>
tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;<br>
- tbio->bi_rw = WRITE;<br>
+ tbio->bi_op = REQ_OP_WRITE;<br>
tbio->bi_private = r10_bio;<br>
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;<br>
tbio->bi_end_io = end_sync_write;<br>
@@ -2078,7 +2080,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)<br>
addr,<br>
s << 9,<br>
bio->bi_io_vec[idx].bv_page,<br>
- READ, false);<br>
+ REQ_OP_READ, 0, false);<br>
if (ok) {<br>
rdev = conf->mirrors[dw].rdev;<br>
addr = r10_bio->devs[1].addr + sect;<br>
@@ -2086,7 +2088,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)<br>
addr,<br>
s << 9,<br>
bio->bi_io_vec[idx].bv_page,<br>
- WRITE, false);<br>
+ REQ_OP_WRITE, 0, false);<br>
if (!ok) {<br>
set_bit(WriteErrorSeen, &rdev->flags);<br>
if (!test_and_set_bit(WantReplacement,<br>
@@ -2213,7 +2215,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,<br>
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)<br>
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))<br>
return -1;<br>
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))<br>
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))<br>
/* success */<br>
return 1;<br>
if (rw == WRITE) {<br>
@@ -2299,7 +2301,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10<br>
r10_bio->devs[sl].addr +<br>
sect,<br>
s<<9,<br>
- conf->tmppage, READ, false);<br>
+ conf->tmppage,<br>
+ REQ_OP_READ, 0, false);<br>
rdev_dec_pending(rdev, mddev);<br>
rcu_read_lock();<br>
if (success)<br>
@@ -2474,7 +2477,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)<br>
choose_data_offset(r10_bio, rdev) +<br>
(sector - r10_bio->sector));<br>
wbio->bi_bdev = rdev->bdev;<br>
- wbio->bi_rw = WRITE;<br>
+ wbio->bi_op = REQ_OP_WRITE;<br>
<br>
if (submit_bio_wait(wbio) < 0)<br>
/* Failure! */<br>
@@ -2550,7 +2553,8 @@ read_more:<br>
bio->bi_iter.bi_sector = r10_bio->devs[slot].addr<br>
+ choose_data_offset(r10_bio, rdev);<br>
bio->bi_bdev = rdev->bdev;<br>
- bio->bi_rw = READ | do_sync;<br>
+ bio->bi_op = REQ_OP_READ;<br>
+ bio->bi_rw = do_sync;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = raid10_end_read_request;<br>
if (max_sectors < r10_bio->sectors) {<br>
@@ -3040,7 +3044,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
biolist = bio;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = end_sync_read;<br>
- bio->bi_rw = READ;<br>
+ bio->bi_op = REQ_OP_READ;<br>
from_addr = r10_bio->devs[j].addr;<br>
bio->bi_iter.bi_sector = from_addr +<br>
rdev->data_offset;<br>
@@ -3066,7 +3070,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
biolist = bio;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = end_sync_write;<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_iter.bi_sector = to_addr<br>
+ rdev->data_offset;<br>
bio->bi_bdev = rdev->bdev;<br>
@@ -3095,7 +3099,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
biolist = bio;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = end_sync_write;<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_iter.bi_sector = to_addr +<br>
rdev->data_offset;<br>
bio->bi_bdev = rdev->bdev;<br>
@@ -3215,7 +3219,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
biolist = bio;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = end_sync_read;<br>
- bio->bi_rw = READ;<br>
+ bio->bi_op = REQ_OP_READ;<br>
bio->bi_iter.bi_sector = sector +<br>
conf->mirrors[d].rdev->data_offset;<br>
bio->bi_bdev = conf->mirrors[d].rdev->bdev;<br>
@@ -3237,7 +3241,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,<br>
biolist = bio;<br>
bio->bi_private = r10_bio;<br>
bio->bi_end_io = end_sync_write;<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_iter.bi_sector = sector +<br>
conf->mirrors[d].replacement->data_offset;<br>
bio->bi_bdev = conf->mirrors[d].replacement->bdev;<br>
@@ -4320,7 +4324,7 @@ read_more:<br>
+ rdev->data_offset);<br>
read_bio->bi_private = r10_bio;<br>
read_bio->bi_end_io = end_sync_read;<br>
- read_bio->bi_rw = READ;<br>
+ read_bio->bi_op = REQ_OP_READ;<br>
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);<br>
read_bio->bi_error = 0;<br>
read_bio->bi_vcnt = 0;<br>
@@ -4354,7 +4358,7 @@ read_more:<br>
rdev2->new_data_offset;<br>
b->bi_private = r10_bio;<br>
b->bi_end_io = end_reshape_write;<br>
- b->bi_rw = WRITE;<br>
+ b->bi_op = REQ_OP_WRITE;<br>
b->bi_next = blist;<br>
blist = b;<br>
}<br>
@@ -4522,7 +4526,7 @@ static int handle_reshape_read_error(struct mddev *mddev,<br>
addr,<br>
s << 9,<br>
bvec[idx].bv_page,<br>
- READ, false);<br>
+ REQ_OP_READ, 0, false);<br>
if (success)<br>
break;<br>
failed:<br>
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c<br>
index 90c2618..56b20c3 100644<br>
--- a/drivers/md/raid5-cache.c<br>
+++ b/drivers/md/raid5-cache.c<br>
@@ -261,7 +261,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)<br>
{<br>
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);<br>
<br>
- bio->bi_rw = WRITE;<br>
+ bio->bi_op = REQ_OP_WRITE;<br>
bio->bi_bdev = log->rdev->bdev;<br>
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;<br>
<br>
@@ -686,6 +686,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)<br>
bio_reset(&log->flush_bio);<br>
log->flush_bio.bi_bdev = log->rdev->bdev;<br>
log->flush_bio.bi_end_io = r5l_log_flush_endio;<br>
+ log->flush_bio.bi_op = REQ_OP_WRITE;<br>
log->flush_bio.bi_rw = WRITE_FLUSH;<br>
submit_bio(&log->flush_bio);<br>
}<br>
@@ -882,7 +883,8 @@ static int r5l_read_meta_block(struct r5l_log *log,<br>
struct r5l_meta_block *mb;<br>
u32 crc, stored_crc;<br>
<br>
- if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))<br>
+ if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,<br>
+ false))<br>
return -EIO;<br>
<br>
mb = page_address(page);<br>
@@ -927,7 +929,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,<br>
&disk_index, sh);<br>
<br>
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,<br>
- sh->dev[disk_index].page, READ, false);<br>
+ sh->dev[disk_index].page, REQ_OP_READ, 0,<br>
+ false);<br>
sh->dev[disk_index].log_checksum =<br>
le32_to_cpu(payload->checksum[0]);<br>
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);<br>
@@ -935,7 +938,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,<br>
} else {<br>
disk_index = sh->pd_idx;<br>
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,<br>
- sh->dev[disk_index].page, READ, false);<br>
+ sh->dev[disk_index].page, REQ_OP_READ, 0,<br>
+ false);<br>
sh->dev[disk_index].log_checksum =<br>
le32_to_cpu(payload->checksum[0]);<br>
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);<br>
@@ -945,7 +949,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,<br>
sync_page_io(log->rdev,<br>
r5l_ring_add(log, *log_offset, BLOCK_SECTORS),<br>
PAGE_SIZE, sh->dev[disk_index].page,<br>
- READ, false);<br>
+ REQ_OP_READ, 0, false);<br>
sh->dev[disk_index].log_checksum =<br>
le32_to_cpu(payload->checksum[1]);<br>
set_bit(R5_Wantwrite,<br>
@@ -987,11 +991,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,<br>
rdev = rcu_dereference(conf->disks[disk_index].rdev);<br>
if (rdev)<br>
sync_page_io(rdev, stripe_sect, PAGE_SIZE,<br>
- sh->dev[disk_index].page, WRITE, false);<br>
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,<br>
+ false);<br>
rrdev = rcu_dereference(conf->disks[disk_index].replacement);<br>
if (rrdev)<br>
sync_page_io(rrdev, stripe_sect, PAGE_SIZE,<br>
- sh->dev[disk_index].page, WRITE, false);<br>
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,<br>
+ false);<br>
}<br>
raid5_release_stripe(sh);<br>
return 0;<br>
@@ -1063,7 +1069,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,<br>
crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);<br>
mb->checksum = cpu_to_le32(crc);<br>
<br>
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {<br>
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,<br>
+ WRITE_FUA, false)) {<br>
__free_page(page);<br>
return -EIO;<br>
}<br>
@@ -1138,7 +1145,7 @@ static int r5l_load_log(struct r5l_log *log)<br>
if (!page)<br>
return -ENOMEM;<br>
<br>
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {<br>
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {<br>
ret = -EIO;<br>
goto ioerr;<br>
}<br>
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c<br>
index 8ab8b65..c36b817 100644<br>
--- a/drivers/md/raid5.c<br>
+++ b/drivers/md/raid5.c<br>
@@ -806,7 +806,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh<br>
dd_idx = 0;<br>
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)<br>
dd_idx++;<br>
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)<br>
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||<br>
+ head->dev[dd_idx].towrite->bi_op != sh->dev[dd_idx].towrite->bi_op)<br>
goto unlock_out;<br>
<br>
if (head->batch_head) {<br>
@@ -891,29 +892,32 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)<br>
if (r5l_write_stripe(conf->log, sh) == 0)<br>
return;<br>
for (i = disks; i--; ) {<br>
- int rw;<br>
+ int op;<br>
+ int op_flags = 0;<br>
int replace_only = 0;<br>
struct bio *bi, *rbi;<br>
struct md_rdev *rdev, *rrdev = NULL;<br>
<br>
sh = head_sh;<br>
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {<br>
- if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))<br>
- rw = WRITE_FUA;<br>
- else<br>
- rw = WRITE;<br>
+ if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) {<br>
+ op = REQ_OP_WRITE;<br>
+ op_flags = WRITE_FUA;<br>
+ } else {<br>
+ op = REQ_OP_WRITE;<br>
+ }<br>
if (test_bit(R5_Discard, &sh->dev[i].flags))<br>
- rw |= REQ_DISCARD;<br>
+ op = REQ_OP_DISCARD;<br>
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))<br>
- rw = READ;<br>
+ op = REQ_OP_READ;<br>
else if (test_and_clear_bit(R5_WantReplace,<br>
&sh->dev[i].flags)) {<br>
- rw = WRITE;<br>
+ op = REQ_OP_WRITE;<br>
replace_only = 1;<br>
} else<br>
continue;<br>
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))<br>
- rw |= REQ_SYNC;<br>
+ op_flags |= REQ_SYNC;<br>
<br>
again:<br>
bi = &sh->dev[i].req;<br>
@@ -927,7 +931,7 @@ again:<br>
rdev = rrdev;<br>
rrdev = NULL;<br>
}<br>
- if (rw & WRITE) {<br>
+ if (op_is_write(op)) {<br>
if (replace_only)<br>
rdev = NULL;<br>
if (rdev == rrdev)<br>
@@ -953,7 +957,7 @@ again:<br>
* need to check for writes. We never accept write errors<br>
* on the replacement, so we don't to check rrdev.<br>
*/<br>
- while ((rw & WRITE) && rdev &&<br>
+ while (op_is_write(op) && rdev &&<br>
test_bit(WriteErrorSeen, &rdev->flags)) {<br>
sector_t first_bad;<br>
int bad_sectors;<br>
@@ -995,8 +999,9 @@ again:<br>
<br>
bio_reset(bi);<br>
bi->bi_bdev = rdev->bdev;<br>
- bi->bi_rw = rw;<br>
- bi->bi_end_io = (rw & WRITE)<br>
+ bi->bi_op = op;<br>
+ bi->bi_rw = op_flags;<br>
+ bi->bi_end_io = op_is_write(op)<br>
? raid5_end_write_request<br>
: raid5_end_read_request;<br>
bi->bi_private = sh;<br>
@@ -1027,7 +1032,7 @@ again:<br>
* If this is discard request, set bi_vcnt 0. We don't<br>
* want to confuse SCSI because SCSI will replace payload<br>
*/<br>
- if (rw & REQ_DISCARD)<br>
+ if (op == REQ_OP_DISCARD)<br>
bi->bi_vcnt = 0;<br>
if (rrdev)<br>
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);<br>
@@ -1047,8 +1052,9 @@ again:<br>
<br>
bio_reset(rbi);<br>
rbi->bi_bdev = rrdev->bdev;<br>
- rbi->bi_rw = rw;<br>
- BUG_ON(!(rw & WRITE));<br>
+ rbi->bi_op = op;<br>
+ rbi->bi_rw = op_flags;<br>
+ BUG_ON(!op_is_write(op));<br>
rbi->bi_end_io = raid5_end_write_request;<br>
rbi->bi_private = sh;<br>
<br>
@@ -1076,7 +1082,7 @@ again:<br>
* If this is discard request, set bi_vcnt 0. We don't<br>
* want to confuse SCSI because SCSI will replace payload<br>
*/<br>
- if (rw & REQ_DISCARD)<br>
+ if (op == REQ_OP_DISCARD)<br>
rbi->bi_vcnt = 0;<br>
if (conf->mddev->gendisk)<br>
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),<br>
@@ -1085,7 +1091,7 @@ again:<br>
generic_make_request(rbi);<br>
}<br>
if (!rdev && !rrdev) {<br>
- if (rw & WRITE)<br>
+ if (op_is_write(op))<br>
set_bit(STRIPE_DEGRADED, &sh->state);<br>
pr_debug("skip op %ld on disc %d for sector %llu\n",<br>
bi->bi_rw, i, (unsigned long long)sh->sector);<br>
@@ -1623,7 +1629,7 @@ again:<br>
set_bit(R5_WantFUA, &dev->flags);<br>
if (wbi->bi_rw & REQ_SYNC)<br>
set_bit(R5_SyncIO, &dev->flags);<br>
- if (wbi->bi_rw & REQ_DISCARD)<br>
+ if (wbi->bi_op == REQ_OP_DISCARD)<br>
set_bit(R5_Discard, &dev->flags);<br>
else {<br>
tx = async_copy_data(1, wbi, &dev->page,<br>
@@ -5178,7 +5184,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)<br>
return;<br>
}<br>
<br>
- if (unlikely(bi->bi_rw & REQ_DISCARD)) {<br>
+ if (unlikely(bi->bi_op == REQ_OP_DISCARD)) {<br>
make_discard_request(mddev, bi);<br>
return;<br>
}<br>
<span class=""><font color="#888888">--<br>
2.7.2<br>
<br>
--<br>
dm-devel mailing list<br>
<a href="mailto:dm-devel@redhat.com">dm-devel@redhat.com</a><br>
<a href="https://www.redhat.com/mailman/listinfo/dm-devel" rel="noreferrer" target="_blank">https://www.redhat.com/mailman/listinfo/dm-devel</a><br>
</font></span></blockquote></div><br><br clear="all"><div><br></div>-- <br><div class="gmail_signature"><div dir="ltr"><div><div dir="ltr"><div>Shaun Tancheff</div><div><br></div></div></div></div></div>
</div></div>