<div dir="ltr">NACK,<div>loop conditionals have to be on rs->raid_disks, as we add legs to raid1 or stripes to raid4/5/6/10 in which case rs->raid_disks will be larger than rs->md.raid_disks.<div>As a result, the new legs wouldn't be processed during RAID validation or device iterations.<div><br><div>Will rework the patch and share here after running it through the lvm test suite raid tests...</div></div></div></div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Jun 27, 2022 at 3:00 PM Mikulas Patocka <<a href="mailto:mpatocka@redhat.com">mpatocka@redhat.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">dm-raid allocates the array of devices with rs->raid_disks entries and<br>
then accesses it in a loop for rs->md.raid_disks. During reshaping,<br>
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses<br>
entries beyond the end of the array.<br>
<br>
We fix this bug by limiting the iteration to rs->raid_disks.<br>
<br>
Signed-off-by: Mikulas Patocka <<a href="mailto:mpatocka@redhat.com" target="_blank">mpatocka@redhat.com</a>><br>
Cc: <a href="mailto:stable@vger.kernel.org" target="_blank">stable@vger.kernel.org</a><br>
<br>
---<br>
drivers/md/dm-raid.c | 12 ++++++------<br>
1 file changed, 6 insertions(+), 6 deletions(-)<br>
<br>
Index: linux-2.6/drivers/md/dm-raid.c<br>
===================================================================<br>
--- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 14:45:30.000000000 +0200<br>
+++ linux-2.6/drivers/md/dm-raid.c 2022-06-27 14:54:02.000000000 +0200<br>
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru<br>
unsigned int rebuilds_per_group = 0, copies;<br>
unsigned int group_size, last_group_start;<br>
<br>
- for (i = 0; i < rs->md.raid_disks; i++)<br>
+ for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++)<br>
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||<br>
!rs->dev[i].rdev.sb_page)<br>
rebuild_cnt++;<br>
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru<br>
* C D D E E<br>
*/<br>
if (__is_raid10_near(rs->md.new_layout)) {<br>
- for (i = 0; i < rs->md.raid_disks; i++) {<br>
+ for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {<br>
if (!(i % copies))<br>
rebuilds_per_group = 0;<br>
if ((!rs->dev[i].rdev.sb_page ||<br>
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru<br>
group_size = (rs->md.raid_disks / copies);<br>
last_group_start = (rs->md.raid_disks / group_size) - 1;<br>
last_group_start *= group_size;<br>
- for (i = 0; i < rs->md.raid_disks; i++) {<br>
+ for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {<br>
if (!(i % copies) && !(i > last_group_start))<br>
rebuilds_per_group = 0;<br>
if ((!rs->dev[i].rdev.sb_page ||<br>
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra<br>
{<br>
int i;<br>
<br>
- for (i = 0; i < rs->md.raid_disks; i++) {<br>
+ for (i = 0; i < rs->md.raid_disks && i < rs->raid_disks; i++) {<br>
struct md_rdev *rdev = &rs->dev[i].rdev;<br>
<br>
if (!test_bit(Journal, &rdev->flags) &&<br>
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d<br>
unsigned int i;<br>
int r = 0;<br>
<br>
- for (i = 0; !r && i < rs->md.raid_disks; i++)<br>
+ for (i = 0; !r && i < rs->md.raid_disks && i < rs->raid_disks; i++)<br>
if (rs->dev[i].data_dev)<br>
r = fn(ti,<br>
rs->dev[i].data_dev,<br>
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de<br>
<br>
memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));<br>
<br>
- for (i = 0; i < mddev->raid_disks; i++) {<br>
+ for (i = 0; i < mddev->raid_disks && i < rs->raid_disks; i++) {<br>
r = &rs->dev[i].rdev;<br>
/* HM FIXME: enhance journal device recovery processing */<br>
if (test_bit(Journal, &r->flags))<br>
<br>
</blockquote></div>