Lines Matching refs:dd_idx
920 int dd_idx; in stripe_add_to_batch_list() local
951 dd_idx = 0; in stripe_add_to_batch_list()
952 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
953 dd_idx++; in stripe_add_to_batch_list()
954 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
955 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
2958 int previous, int *dd_idx, in raid5_compute_sector() argument
2987 *dd_idx = sector_div(stripe, data_disks); in raid5_compute_sector()
3001 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3002 (*dd_idx)++; in raid5_compute_sector()
3006 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3007 (*dd_idx)++; in raid5_compute_sector()
3011 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3015 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3019 (*dd_idx)++; in raid5_compute_sector()
3035 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3037 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3038 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3044 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3046 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3047 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3052 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3057 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3063 (*dd_idx) += 2; in raid5_compute_sector()
3077 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3079 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3080 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3093 (*dd_idx)++; /* Q D D D P */ in raid5_compute_sector()
3095 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3096 (*dd_idx) += 2; /* D D P Q D */ in raid5_compute_sector()
3104 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3111 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3112 (*dd_idx)++; in raid5_compute_sector()
3118 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3119 (*dd_idx)++; in raid5_compute_sector()
3125 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3131 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3137 (*dd_idx)++; in raid5_compute_sector()
3172 int dummy1, dd_idx = i; in raid5_compute_blocknr() local
3270 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3445 int dd_idx, int forwrite) in stripe_bio_overlaps() argument
3458 bip = &sh->dev[dd_idx].towrite; in stripe_bio_overlaps()
3460 bip = &sh->dev[dd_idx].toread; in stripe_bio_overlaps()
3487 (i == dd_idx || sh->dev[i].towrite)) { in stripe_bio_overlaps()
3505 int dd_idx, int forwrite, int previous) in __add_stripe_bio() argument
3512 bip = &sh->dev[dd_idx].towrite; in __add_stripe_bio()
3516 bip = &sh->dev[dd_idx].toread; in __add_stripe_bio()
3534 sector_t sector = sh->dev[dd_idx].sector; in __add_stripe_bio()
3535 for (bi=sh->dev[dd_idx].towrite; in __add_stripe_bio()
3536 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3538 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3542 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3543 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in __add_stripe_bio()
3548 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx, in __add_stripe_bio()
3549 sh->dev[dd_idx].sector); in __add_stripe_bio()
3583 int dd_idx, int forwrite, int previous) in add_stripe_bio() argument
3587 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_stripe_bio()
3588 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3593 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_stripe_bio()
3605 int dd_idx; in stripe_set_idx() local
3613 &dd_idx, sh); in stripe_set_idx()
4588 int dd_idx, j; in handle_stripe_expansion() local
4594 &dd_idx, NULL); in handle_stripe_expansion()
4604 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { in handle_stripe_expansion()
4612 tx = async_memcpy(sh2->dev[dd_idx].page, in handle_stripe_expansion()
4613 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4617 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
4618 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); in handle_stripe_expansion()
5452 int dd_idx; in raid5_read_one_chunk() local
5461 &dd_idx, NULL); in raid5_read_one_chunk()
5467 rdev = conf->disks[dd_idx].replacement; in raid5_read_one_chunk()
5470 rdev = conf->disks[dd_idx].rdev; in raid5_read_one_chunk()
5825 int dd_idx; in stripe_ahead_of_reshape() local
5827 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in stripe_ahead_of_reshape()
5828 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_ahead_of_reshape()
5831 min_sector = min(min_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5832 max_sector = max(max_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5851 int dd_idx; in add_all_stripe_bios() local
5855 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5856 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5858 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5865 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_all_stripe_bios()
5880 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5881 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5883 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5890 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_all_stripe_bios()
5941 int seq, dd_idx; in make_stripe_request() local
5957 &dd_idx, NULL); in make_stripe_request()
6047 int dd_idx; in raid5_bio_lowest_chunk_sector() local
6054 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6062 dd_idx++; in raid5_bio_lowest_chunk_sector()
6063 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) in raid5_bio_lowest_chunk_sector()
6064 dd_idx++; in raid5_bio_lowest_chunk_sector()
6065 if (dd_idx >= raid_disks) in raid5_bio_lowest_chunk_sector()
6229 int dd_idx; in reshape_request() local
6421 1, &dd_idx, NULL); in reshape_request()
6425 1, &dd_idx, NULL); in reshape_request()
6595 int dd_idx; in retry_aligned_read() local
6603 0, &dd_idx, NULL); in retry_aligned_read()
6624 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6631 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()