Lines Matching refs:r10_bio

75 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
133 struct r10bio *r10_bio; in r10buf_pool_alloc() local
139 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
140 if (!r10_bio) in r10buf_pool_alloc()
166 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
173 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
180 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
187 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
198 rp->raid_bio = r10_bio; in r10buf_pool_alloc()
206 return r10_bio; in r10buf_pool_alloc()
215 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
216 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
217 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
219 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
220 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
224 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
258 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
263 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
267 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
268 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
274 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
276 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
278 put_all_bios(conf, r10_bio); in free_r10bio()
279 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
282 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
284 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
286 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
297 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
300 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
304 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
319 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
321 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
322 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
324 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
334 free_r10bio(r10_bio); in raid_end_bio_io()
340 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
342 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
344 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
345 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
351 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
358 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
360 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
366 update_head_pos(slot, r10_bio); in find_bio_disk()
372 return r10_bio->devs[slot].devnum; in find_bio_disk()
378 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
381 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
383 slot = r10_bio->read_slot; in raid10_end_read_request()
384 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
388 update_head_pos(slot, r10_bio); in raid10_end_read_request()
400 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
407 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
412 raid_end_bio_io(r10_bio); in raid10_end_read_request()
421 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
422 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
423 reschedule_retry(r10_bio); in raid10_end_read_request()
427 static void close_write(struct r10bio *r10_bio) in close_write() argument
429 struct mddev *mddev = r10_bio->mddev; in close_write()
432 mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors, in close_write()
433 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
438 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
440 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
441 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
442 reschedule_retry(r10_bio); in one_write_done()
444 close_write(r10_bio); in one_write_done()
445 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
446 reschedule_retry(r10_bio); in one_write_done()
448 raid_end_bio_io(r10_bio); in one_write_done()
455 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
458 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
501 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
504 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_end_write_request()
505 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
529 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
532 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in raid10_end_write_request()
533 r10_bio->sectors) && in raid10_end_write_request()
537 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
539 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
541 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
550 one_write_done(r10_bio); in raid10_end_write_request()
725 struct r10bio *r10_bio, in read_balance() argument
728 const sector_t this_sector = r10_bio->sector; in read_balance()
730 int sectors = r10_bio->sectors; in read_balance()
740 raid10_find_phys(conf, r10_bio); in read_balance()
748 clear_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
760 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
762 disk = r10_bio->devs[slot].devnum; in read_balance()
765 r10_bio->devs[slot].addr + sectors > in read_balance()
772 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
775 dev_sector = r10_bio->devs[slot].addr; in read_balance()
821 set_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
831 new_distance = r10_bio->devs[slot].addr; in read_balance()
833 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
854 r10_bio->read_slot = slot; in read_balance()
1075 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1079 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1150 struct r10bio *r10_bio, bool io_accounting) in raid10_read_request() argument
1159 int slot = r10_bio->read_slot; in raid10_read_request()
1163 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1178 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1185 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1189 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) in raid10_read_request()
1191 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1196 (unsigned long long)r10_bio->sector); in raid10_read_request()
1198 raid_end_bio_io(r10_bio); in raid10_read_request()
1205 (unsigned long long)r10_bio->sector); in raid10_read_request()
1214 r10_bio->master_bio = bio; in raid10_read_request()
1215 r10_bio->sectors = max_sectors; in raid10_read_request()
1217 slot = r10_bio->read_slot; in raid10_read_request()
1221 r10_bio->master_bio = bio; in raid10_read_request()
1225 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1226 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1228 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1229 choose_data_offset(r10_bio, rdev); in raid10_read_request()
1233 test_bit(R10BIO_FailFast, &r10_bio->state)) in raid10_read_request()
1235 read_bio->bi_private = r10_bio; in raid10_read_request()
1236 mddev_trace_remap(mddev, read_bio, r10_bio->sector); in raid10_read_request()
1241 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1251 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1259 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1261 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1263 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1264 choose_data_offset(r10_bio, rdev)); in raid10_write_one_disk()
1271 mbio->bi_private = r10_bio; in raid10_write_one_disk()
1272 mddev_trace_remap(mddev, mbio, r10_bio->sector); in raid10_write_one_disk()
1276 atomic_inc(&r10_bio->remaining); in raid10_write_one_disk()
1286 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio) in wait_blocked_dev() argument
1311 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1317 if (!r10_bio->sectors) in wait_blocked_dev()
1321 r10_bio->sectors) < 0) { in wait_blocked_dev()
1347 struct r10bio *r10_bio) in raid10_write_request() argument
1375 sectors = r10_bio->sectors; in raid10_write_request()
1412 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in raid10_write_request()
1413 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1415 wait_blocked_dev(mddev, r10_bio); in raid10_write_request()
1417 max_sectors = r10_bio->sectors; in raid10_write_request()
1420 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1430 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1431 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1434 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_write_request()
1439 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1470 r10_bio->devs[i].bio = bio; in raid10_write_request()
1474 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1479 if (max_sectors < r10_bio->sectors) in raid10_write_request()
1480 r10_bio->sectors = max_sectors; in raid10_write_request()
1482 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request()
1483 struct bio *split = bio_split(bio, r10_bio->sectors, in raid10_write_request()
1490 r10_bio->master_bio = bio; in raid10_write_request()
1494 r10_bio->master_bio = bio; in raid10_write_request()
1495 atomic_set(&r10_bio->remaining, 1); in raid10_write_request()
1496 mddev->bitmap_ops->startwrite(mddev, r10_bio->sector, r10_bio->sectors, in raid10_write_request()
1500 if (r10_bio->devs[i].bio) in raid10_write_request()
1501 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1502 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1503 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1505 one_write_done(r10_bio); in raid10_write_request()
1511 struct r10bio *r10_bio; in __make_request() local
1513 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1515 r10_bio->master_bio = bio; in __make_request()
1516 r10_bio->sectors = sectors; in __make_request()
1518 r10_bio->mddev = mddev; in __make_request()
1519 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1520 r10_bio->state = 0; in __make_request()
1521 r10_bio->read_slot = -1; in __make_request()
1522 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1526 raid10_read_request(mddev, bio, r10_bio, true); in __make_request()
1528 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1555 struct r10bio *r10_bio = bio->bi_private; in raid10_end_discard_request() local
1556 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request()
1564 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid10_end_discard_request()
1565 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_discard_request()
1567 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_discard_request()
1571 raid_end_discard_bio(r10_bio); in raid10_end_discard_request()
1587 struct r10bio *r10_bio, *first_r10bio; in raid10_handle_discard() local
1692 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in raid10_handle_discard()
1693 r10_bio->mddev = mddev; in raid10_handle_discard()
1694 r10_bio->state = 0; in raid10_handle_discard()
1695 r10_bio->sectors = 0; in raid10_handle_discard()
1696 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1697 wait_blocked_dev(mddev, r10_bio); in raid10_handle_discard()
1707 r10_bio->master_bio = bio; in raid10_handle_discard()
1708 set_bit(R10BIO_Discard, &r10_bio->state); in raid10_handle_discard()
1710 first_r10bio = r10_bio; in raid10_handle_discard()
1712 r10_bio->master_bio = (struct bio *)first_r10bio; in raid10_handle_discard()
1724 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1725 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1735 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1739 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1744 atomic_set(&r10_bio->remaining, 1); in raid10_handle_discard()
1781 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1786 mbio->bi_private = r10_bio; in raid10_handle_discard()
1787 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1788 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1789 atomic_inc(&r10_bio->remaining); in raid10_handle_discard()
1791 dev_start + choose_data_offset(r10_bio, rdev), in raid10_handle_discard()
1795 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1800 rbio->bi_private = r10_bio; in raid10_handle_discard()
1801 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1802 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1803 atomic_inc(&r10_bio->remaining); in raid10_handle_discard()
1805 dev_start + choose_data_offset(r10_bio, rrdev), in raid10_handle_discard()
1817 raid_end_discard_bio(r10_bio); in raid10_handle_discard()
1822 raid_end_discard_bio(r10_bio); in raid10_handle_discard()
2188 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) in __end_sync_read() argument
2190 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
2193 set_bit(R10BIO_Uptodate, &r10_bio->state); in __end_sync_read()
2198 atomic_add(r10_bio->sectors, in __end_sync_read()
2205 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in __end_sync_read()
2206 atomic_dec_and_test(&r10_bio->remaining)) { in __end_sync_read()
2210 reschedule_retry(r10_bio); in __end_sync_read()
2216 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_read() local
2217 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
2218 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
2220 __end_sync_read(r10_bio, bio, d); in end_sync_read()
2226 struct r10bio *r10_bio = bio->bi_private; in end_reshape_read() local
2228 __end_sync_read(r10_bio, bio, r10_bio->read_slot); in end_reshape_read()
2231 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
2233 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
2235 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
2236 if (r10_bio->master_bio == NULL) { in end_sync_request()
2238 sector_t s = r10_bio->sectors; in end_sync_request()
2239 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
2240 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
2241 reschedule_retry(r10_bio); in end_sync_request()
2243 put_buf(r10_bio); in end_sync_request()
2247 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
2248 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
2249 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
2250 reschedule_retry(r10_bio); in end_sync_request()
2252 put_buf(r10_bio); in end_sync_request()
2253 r10_bio = r10_bio2; in end_sync_request()
2260 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_write() local
2261 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
2268 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2282 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
2284 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in end_sync_write()
2285 r10_bio->sectors)) { in end_sync_write()
2286 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
2291 end_sync_request(r10_bio); in end_sync_write()
2310 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2318 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
2322 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2329 fbio = r10_bio->devs[i].bio; in sync_request_write()
2330 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2334 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2341 tbio = r10_bio->devs[i].bio; in sync_request_write()
2349 d = r10_bio->devs[i].devnum; in sync_request_write()
2351 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2356 int sectors = r10_bio->sectors; in sync_request_write()
2369 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2388 rp->raid_bio = r10_bio; in sync_request_write()
2390 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2396 atomic_inc(&r10_bio->remaining); in sync_request_write()
2411 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2414 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2415 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2417 d = r10_bio->devs[i].devnum; in sync_request_write()
2418 atomic_inc(&r10_bio->remaining); in sync_request_write()
2425 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2426 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2427 put_buf(r10_bio); in sync_request_write()
2441 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2450 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2452 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2454 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2456 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2457 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2470 addr = r10_bio->devs[0].addr + sect; in fix_recovery_read_error()
2478 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2502 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2524 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2528 struct bio *wbio = r10_bio->devs[1].bio; in recovery_request_write()
2529 struct bio *wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2538 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2539 fix_recovery_read_error(r10_bio); in recovery_request_write()
2541 end_sync_request(r10_bio); in recovery_request_write()
2543 end_sync_request(r10_bio); in recovery_request_write()
2551 d = r10_bio->devs[1].devnum; in recovery_request_write()
2594 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2597 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; in fix_read_error()
2599 int d = r10_bio->devs[slot].devnum; in fix_read_error()
2612 r10_bio->devs[slot].bio = IO_BLOCKED; in fix_read_error()
2626 d = r10_bio->devs[sl].devnum; in fix_read_error()
2632 r10_bio->devs[sl].addr + sect, in fix_read_error()
2636 r10_bio->devs[sl].addr + in fix_read_error()
2655 int dn = r10_bio->devs[slot].devnum; in fix_read_error()
2660 r10_bio->devs[slot].addr in fix_read_error()
2664 r10_bio->devs[slot].bio in fix_read_error()
2676 d = r10_bio->devs[sl].devnum; in fix_read_error()
2685 r10_bio->devs[sl].addr + in fix_read_error()
2694 choose_data_offset(r10_bio, in fix_read_error()
2708 d = r10_bio->devs[sl].devnum; in fix_read_error()
2717 r10_bio->devs[sl].addr + in fix_read_error()
2726 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2737 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2750 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2752 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2753 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2755 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2770 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2778 sector = r10_bio->sector; in narrow_write_error()
2779 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2792 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2794 choose_data_offset(r10_bio, rdev); in narrow_write_error()
2811 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2813 int slot = r10_bio->read_slot; in handle_read_error()
2816 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2826 bio = r10_bio->devs[slot].bio; in handle_read_error()
2828 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2831 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2834 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2840 r10_bio->state = 0; in handle_read_error()
2841 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false); in handle_read_error()
2849 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2860 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2861 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
2863 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2865 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2866 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2868 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2871 r10_bio->devs[m].addr, in handle_write_completed()
2872 r10_bio->sectors, 0); in handle_write_completed()
2876 r10_bio->devs[m].addr, in handle_write_completed()
2877 r10_bio->sectors, 0)) in handle_write_completed()
2881 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2882 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2885 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2888 r10_bio->devs[m].addr, in handle_write_completed()
2889 r10_bio->sectors, 0); in handle_write_completed()
2893 r10_bio->devs[m].addr, in handle_write_completed()
2894 r10_bio->sectors, 0)) in handle_write_completed()
2898 put_buf(r10_bio); in handle_write_completed()
2902 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2903 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2908 r10_bio->devs[m].addr, in handle_write_completed()
2909 r10_bio->sectors, 0); in handle_write_completed()
2913 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
2916 &r10_bio->state); in handle_write_completed()
2920 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2925 r10_bio->devs[m].addr, in handle_write_completed()
2926 r10_bio->sectors, 0); in handle_write_completed()
2932 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
2943 &r10_bio->state)) in handle_write_completed()
2944 close_write(r10_bio); in handle_write_completed()
2945 raid_end_bio_io(r10_bio); in handle_write_completed()
2953 struct r10bio *r10_bio; in raid10d() local
2973 r10_bio = list_first_entry(&tmp, struct r10bio, in raid10d()
2975 list_del(&r10_bio->retry_list); in raid10d()
2977 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10d()
2980 &r10_bio->state)) in raid10d()
2981 close_write(r10_bio); in raid10d()
2982 raid_end_bio_io(r10_bio); in raid10d()
2996 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
3001 mddev = r10_bio->mddev; in raid10d()
3003 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
3004 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
3005 handle_write_completed(conf, r10_bio); in raid10d()
3006 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
3007 reshape_request_write(mddev, r10_bio); in raid10d()
3008 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
3009 sync_request_write(mddev, r10_bio); in raid10d()
3010 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
3011 recovery_request_write(mddev, r10_bio); in raid10d()
3012 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
3013 handle_read_error(mddev, r10_bio); in raid10d()
3144 struct r10bio *r10_bio; in raid10_sync_request() local
3291 r10_bio = NULL; in raid10_sync_request()
3316 rb2 = r10_bio; in raid10_sync_request()
3346 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3347 r10_bio->state = 0; in raid10_sync_request()
3349 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3351 r10_bio->master_bio = (struct bio*)rb2; in raid10_sync_request()
3354 r10_bio->mddev = mddev; in raid10_sync_request()
3355 set_bit(R10BIO_IsRecover, &r10_bio->state); in raid10_sync_request()
3356 r10_bio->sector = sect; in raid10_sync_request()
3358 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3378 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3388 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3402 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3409 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3417 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3420 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3421 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3422 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3423 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3424 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3427 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3435 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3437 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3440 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3456 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3468 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3474 r10_bio->devs[k].addr, in raid10_sync_request()
3480 r10_bio->devs[k].addr, in raid10_sync_request()
3494 put_buf(r10_bio); in raid10_sync_request()
3497 r10_bio = rb2; in raid10_sync_request()
3508 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3515 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3522 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3527 while (r10_bio) { in raid10_sync_request()
3528 struct r10bio *rb2 = r10_bio; in raid10_sync_request()
3529 r10_bio = (struct r10bio*) rb2->master_bio; in raid10_sync_request()
3561 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3562 r10_bio->state = 0; in raid10_sync_request()
3564 r10_bio->mddev = mddev; in raid10_sync_request()
3565 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3569 r10_bio->master_bio = NULL; in raid10_sync_request()
3570 r10_bio->sector = sector_nr; in raid10_sync_request()
3571 set_bit(R10BIO_IsSync, &r10_bio->state); in raid10_sync_request()
3572 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3573 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3576 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3581 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3582 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3584 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3590 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3603 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3621 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3624 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3638 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3639 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3642 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3643 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3648 put_buf(r10_bio); in raid10_sync_request()
3676 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3729 r10_bio = get_resync_r10bio(bio); in raid10_sync_request()
3730 r10_bio->sectors = nr_sectors; in raid10_sync_request()
4645 struct r10bio *r10_bio; in reshape_request() local
4750 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4751 r10_bio->state = 0; in reshape_request()
4753 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4754 r10_bio->mddev = mddev; in reshape_request()
4755 r10_bio->sector = sector_nr; in reshape_request()
4756 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4757 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4758 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4759 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4766 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4773 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4775 read_bio->bi_private = r10_bio; in reshape_request()
4777 r10_bio->master_bio = read_bio; in reshape_request()
4778 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4807 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4814 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4818 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4821 b = r10_bio->devs[s/2].bio; in reshape_request()
4827 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4838 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4854 r10_bio->sectors = nr_sectors; in reshape_request()
4857 md_sync_acct_bio(read_bio, r10_bio->sectors); in reshape_request()
4858 atomic_inc(&r10_bio->remaining); in reshape_request()
4878 static void end_reshape_request(struct r10bio *r10_bio);
4880 struct r10bio *r10_bio);
4881 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
4891 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
4892 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
4894 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
4901 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
4904 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4908 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4911 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4917 md_sync_acct_bio(b, r10_bio->sectors); in reshape_request_write()
4918 atomic_inc(&r10_bio->remaining); in reshape_request_write()
4922 end_reshape_request(r10_bio); in reshape_request_write()
4956 struct r10bio *r10_bio) in handle_reshape_read_error() argument
4959 int sectors = r10_bio->sectors; in handle_reshape_read_error()
4973 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
4975 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
5028 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_reshape_write() local
5029 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
5036 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
5046 end_reshape_request(r10_bio); in end_reshape_write()
5049 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
5051 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
5053 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
5054 bio_put(r10_bio->master_bio); in end_reshape_request()
5055 put_buf(r10_bio); in end_reshape_request()