<div dir="ltr">From 852b91efc183e9b87ac50f15fb4df86f26f73532 Mon Sep 17 00:00:00 2001<br>From: Heinz Mauelshagen <<a href="mailto:heinzm@redhat.com">heinzm@redhat.com</a>><br>Date: Mon, 27 Jun 2022 23:54:24 +0200<br>Subject: [PATCH] dm-raid: fix array out of bounds (OOB)<br><br>Supersedes "[PATCH] dm-raid: fix out of memory accesses in dm-raid"<br><br><br>On RAID mapping construction, dm-raid allocates an array rs->devs[rs->raid_disks]<br>for the raid device members.  rs->raid_disks is defined by the number of raid<br>metadata and image tupples passed into the target's constructor.<br><br>That number can be different from the current number of members for existing raid<br>sets as defined in their superblocks in case of layout changes requested:<br><br>- raid1 legs being added/removed<br><br>- raid4/5/6/10 number of stripes changed (stripe reshaping)<br><br>- takeover to higher raid level (e.g. raid5 -> raid6)<br><br>Accessing array members, rs->raid_disks has to be used in loops instead of<br>potentially larger rs->md.raid_disks causing OOB access on the devices array.<br><br>Patch changes instances prone to OOB.<br>Also ensures added devices are validated in validate_raid_redundancy().<br><br>Initially discovered by KASAN.<br><br>Passes all LVM2 RAID tests (KASAN enabled).<br><br>Signed-off-by: Heinz Mauelshagen <<a href="mailto:heinzm@redhat.com">heinzm@redhat.com</a>><br>Tested-by: Heinz Mauelshagen <<a href="mailto:heinzm@redhat.com">heinzm@redhat.com</a>><br>---<br> drivers/md/dm-raid.c | 22 ++++++++++++----------<br> 1 file changed, 12 insertions(+), 10 deletions(-)<br><br>diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c<br>index 2b26435a6946..bcec0e1a049d 100644<br>--- a/drivers/md/dm-raid.c<br>+++ b/drivers/md/dm-raid.c<br>@@ -1001,12 +1001,13 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)<br> static int validate_raid_redundancy(struct raid_set *rs)<br> {<br>        unsigned int i, rebuild_cnt = 0;<br>-       unsigned int rebuilds_per_group = 0, copies;<br>+       unsigned int rebuilds_per_group = 0, copies, raid_disks;<br>        unsigned int group_size, last_group_start;<br> <br>-       for (i = 0; i < rs->md.raid_disks; i++)<br>-               if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||<br>-                   !rs->dev[i].rdev.sb_page)<br>+       for (i = 0; i < rs->raid_disks; i++)<br>+               if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&<br>+                   ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||<br>+                     !rs->dev[i].rdev.sb_page)))<br>                        rebuild_cnt++;<br> <br>        switch (rs->md.level) {<br>@@ -1046,8 +1047,9 @@ static int validate_raid_redundancy(struct raid_set *rs)<br>                 *          A    A    B    B    C<br>                 *          C    D    D    E    E<br>                 */<br>+               raid_disks = min(rs->raid_disks, rs->md.raid_disks);<br>                if (__is_raid10_near(rs->md.new_layout)) {<br>-                       for (i = 0; i < rs->md.raid_disks; i++) {<br>+                       for (i = 0; i < raid_disks; i++) {<br>                                if (!(i % copies))<br>                                        rebuilds_per_group = 0;<br>                                if ((!rs->dev[i].rdev.sb_page ||<br>@@ -1070,10 +1072,10 @@ static int validate_raid_redundancy(struct raid_set *rs)<br>                 * results in the need to treat the last (potentially larger)<br>                 * set differently.<br>                 */<br>-               group_size = (rs->md.raid_disks / copies);<br>-               last_group_start = (rs->md.raid_disks / group_size) - 1;<br>+               group_size = (raid_disks / copies);<br>+               last_group_start = (raid_disks / group_size) - 1;<br>                last_group_start *= group_size;<br>-               for (i = 0; i < rs->md.raid_disks; i++) {<br>+               for (i = 0; i < raid_disks; i++) {<br>                        if (!(i % copies) && !(i > last_group_start))<br>                                rebuilds_per_group = 0;<br>                        if ((!rs->dev[i].rdev.sb_page ||<br>@@ -1588,7 +1590,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)<br> {<br>        int i;<br> <br>-       for (i = 0; i < rs->md.raid_disks; i++) {<br>+       for (i = 0; i < rs->raid_disks; i++) {<br>                struct md_rdev *rdev = &rs->dev[i].rdev;<br> <br>                if (!test_bit(Journal, &rdev->flags) &&<br>@@ -3771,7 +3773,7 @@ static int raid_iterate_devices(struct dm_target *ti,<br>        unsigned int i;<br>        int r = 0;<br> <br>-       for (i = 0; !r && i < rs->md.raid_disks; i++)<br>+       for (i = 0; !r && i < rs->raid_disks; i++)<br>                if (rs->dev[i].data_dev)<br>                        r = fn(ti,<br>                                 rs->dev[i].data_dev,<br>-- <br>2.36.1<br><div><br></div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Jun 27, 2022 at 3:56 PM Mikulas Patocka <<a href="mailto:mpatocka@redhat.com">mpatocka@redhat.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex">dm-raid allocates the array of devices with rs->raid_disks entries and<br>
then accesses it in a loop for rs->md.raid_disks. During reshaping,<br>
rs->md.raid_disks may be greater than rs->raid_disks, so it accesses<br>
entries beyond the end of the array.<br>
<br>
We fix this bug by limiting the iteration to rs->raid_disks.<br>
<br>
The bug is triggered when running lvm test shell/lvconvert-raid.sh and the<br>
kernel is compiled with kasan.<br>
<br>
Signed-off-by: Mikulas Patocka <<a href="mailto:mpatocka@redhat.com" target="_blank">mpatocka@redhat.com</a>><br>
Cc: <a href="mailto:stable@vger.kernel.org" target="_blank">stable@vger.kernel.org</a><br>
<br>
---<br>
 drivers/md/dm-raid.c |   12 ++++++------<br>
 1 file changed, 6 insertions(+), 6 deletions(-)<br>
<br>
Index: linux-2.6/drivers/md/dm-raid.c<br>
===================================================================<br>
--- linux-2.6.orig/drivers/md/dm-raid.c 2022-06-27 15:44:12.000000000 +0200<br>
+++ linux-2.6/drivers/md/dm-raid.c      2022-06-27 15:44:12.000000000 +0200<br>
@@ -1004,7 +1004,7 @@ static int validate_raid_redundancy(stru<br>
        unsigned int rebuilds_per_group = 0, copies;<br>
        unsigned int group_size, last_group_start;<br>
<br>
-       for (i = 0; i < rs->md.raid_disks; i++)<br>
+       for (i = 0; i < rs->raid_disks; i++)<br>
                if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||<br>
                    !rs->dev[i].rdev.sb_page)<br>
                        rebuild_cnt++;<br>
@@ -1047,7 +1047,7 @@ static int validate_raid_redundancy(stru<br>
                 *          C    D    D    E    E<br>
                 */<br>
                if (__is_raid10_near(rs->md.new_layout)) {<br>
-                       for (i = 0; i < rs->md.raid_disks; i++) {<br>
+                       for (i = 0; i < rs->raid_disks; i++) {<br>
                                if (!(i % copies))<br>
                                        rebuilds_per_group = 0;<br>
                                if ((!rs->dev[i].rdev.sb_page ||<br>
@@ -1073,7 +1073,7 @@ static int validate_raid_redundancy(stru<br>
                group_size = (rs->md.raid_disks / copies);<br>
                last_group_start = (rs->md.raid_disks / group_size) - 1;<br>
                last_group_start *= group_size;<br>
-               for (i = 0; i < rs->md.raid_disks; i++) {<br>
+               for (i = 0; i < rs->raid_disks; i++) {<br>
                        if (!(i % copies) && !(i > last_group_start))<br>
                                rebuilds_per_group = 0;<br>
                        if ((!rs->dev[i].rdev.sb_page ||<br>
@@ -1588,7 +1588,7 @@ static sector_t __rdev_sectors(struct ra<br>
 {<br>
        int i;<br>
<br>
-       for (i = 0; i < rs->md.raid_disks; i++) {<br>
+       for (i = 0; i < rs->raid_disks; i++) {<br>
                struct md_rdev *rdev = &rs->dev[i].rdev;<br>
<br>
                if (!test_bit(Journal, &rdev->flags) &&<br>
@@ -3766,7 +3766,7 @@ static int raid_iterate_devices(struct d<br>
        unsigned int i;<br>
        int r = 0;<br>
<br>
-       for (i = 0; !r && i < rs->md.raid_disks; i++)<br>
+       for (i = 0; !r && i < rs->raid_disks; i++)<br>
                if (rs->dev[i].data_dev)<br>
                        r = fn(ti,<br>
                                 rs->dev[i].data_dev,<br>
@@ -3817,7 +3817,7 @@ static void attempt_restore_of_faulty_de<br>
<br>
        memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));<br>
<br>
-       for (i = 0; i < mddev->raid_disks; i++) {<br>
+       for (i = 0; i < rs->raid_disks; i++) {<br>
                r = &rs->dev[i].rdev;<br>
                /* HM FIXME: enhance journal device recovery processing */<br>
                if (test_bit(Journal, &r->flags))<br>
<br>
</blockquote></div>