Lines Matching full:log
3 * Partial Parity Log for closing the RAID5 write hole
15 #include "raid5-log.h"
76 * data+parity is written). The log->io_list tracks all io_units of a log
97 atomic64_t seq; /* current log write sequence number */
116 struct ppl_conf *ppl_conf; /* shared between all log instances */
119 * this log instance */
124 struct list_head io_list; /* all io_units of this log */
136 struct ppl_log *log; member
143 u64 seq; /* sequence number of this log write */
144 struct list_head log_sibling; /* log->io_list */
150 bool submitted; /* true if write to log started */
232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, in ppl_new_iounit() argument
235 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_new_iounit()
248 io->log = log; in ppl_new_iounit()
253 bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS, in ppl_new_iounit()
267 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) in ppl_log_stripe() argument
269 struct ppl_io_unit *io = log->current_io; in ppl_log_stripe()
280 if (io && (io->pp_size == log->entry_space || in ppl_log_stripe()
289 io = ppl_new_iounit(log, sh); in ppl_log_stripe()
292 spin_lock_irq(&log->io_list_lock); in ppl_log_stripe()
293 list_add_tail(&io->log_sibling, &log->io_list); in ppl_log_stripe()
294 spin_unlock_irq(&log->io_list_lock); in ppl_log_stripe()
296 log->current_io = io; in ppl_log_stripe()
365 struct ppl_log *log; in ppl_write_stripe() local
374 log = &ppl_conf->child_logs[sh->pd_idx]; in ppl_write_stripe()
376 mutex_lock(&log->io_mutex); in ppl_write_stripe()
378 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { in ppl_write_stripe()
379 mutex_unlock(&log->io_mutex); in ppl_write_stripe()
387 if (ppl_log_stripe(log, sh)) { in ppl_write_stripe()
393 mutex_unlock(&log->io_mutex); in ppl_write_stripe()
401 struct ppl_log *log = io->log; in ppl_log_endio() local
402 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_log_endio()
408 md_error(ppl_conf->mddev, log->rdev); in ppl_log_endio()
430 struct ppl_log *log = io->log; in ppl_submit_iounit() local
431 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_submit_iounit()
439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { in ppl_submit_iounit()
460 if (log->use_multippl && in ppl_submit_iounit()
461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < in ppl_submit_iounit()
463 log->next_io_sector = log->rdev->ppl.sector; in ppl_submit_iounit()
467 bio->bi_iter.bi_sector = log->next_io_sector; in ppl_submit_iounit()
470 pr_debug("%s: log->current_io_sector: %llu\n", __func__, in ppl_submit_iounit()
471 (unsigned long long)log->next_io_sector); in ppl_submit_iounit()
473 if (log->use_multippl) in ppl_submit_iounit()
474 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; in ppl_submit_iounit()
476 WARN_ON(log->disk_flush_bitmap != 0); in ppl_submit_iounit()
484 set_bit(i, &log->disk_flush_bitmap); in ppl_submit_iounit()
509 static void ppl_submit_current_io(struct ppl_log *log) in ppl_submit_current_io() argument
513 spin_lock_irq(&log->io_list_lock); in ppl_submit_current_io()
515 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, in ppl_submit_current_io()
520 spin_unlock_irq(&log->io_list_lock); in ppl_submit_current_io()
525 if (io == log->current_io) in ppl_submit_current_io()
526 log->current_io = NULL; in ppl_submit_current_io()
535 struct ppl_log *log; in ppl_write_stripe_run() local
539 log = &ppl_conf->child_logs[i]; in ppl_write_stripe_run()
541 mutex_lock(&log->io_mutex); in ppl_write_stripe_run()
542 ppl_submit_current_io(log); in ppl_write_stripe_run()
543 mutex_unlock(&log->io_mutex); in ppl_write_stripe_run()
549 struct ppl_log *log = io->log; in ppl_io_unit_finished() local
550 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_io_unit_finished()
558 spin_lock(&log->io_list_lock); in ppl_io_unit_finished()
560 spin_unlock(&log->io_list_lock); in ppl_io_unit_finished()
584 struct ppl_log *log = io->log; in ppl_flush_endio() local
585 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_flush_endio()
610 struct ppl_log *log = io->log; in ppl_do_flush() local
611 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_do_flush()
619 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) { in ppl_do_flush()
643 log->disk_flush_bitmap = 0; in ppl_do_flush()
652 struct ppl_log *log) in ppl_no_io_unit_submitted() argument
656 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, in ppl_no_io_unit_submitted()
669 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_quiesce() local
671 spin_lock_irq(&log->io_list_lock); in ppl_quiesce()
673 ppl_no_io_unit_submitted(conf, log), in ppl_quiesce()
674 log->io_list_lock); in ppl_quiesce()
675 spin_unlock_irq(&log->io_list_lock); in ppl_quiesce()
698 if (io->log->disk_flush_bitmap) in ppl_stripe_write_finished()
784 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, in ppl_recover_entry() argument
787 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_recover_entry()
916 if (!sync_page_io(log->rdev, in ppl_recover_entry()
917 ppl_sector - log->rdev->data_offset + i, in ppl_recover_entry()
922 md_error(mddev, log->rdev); in ppl_recover_entry()
937 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev); in ppl_recover_entry()
959 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, in ppl_recover() argument
962 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_recover()
963 struct md_rdev *rdev = log->rdev; in ppl_recover()
1019 ret = ppl_recover_entry(log, e, ppl_sector); in ppl_recover()
1035 static int ppl_write_empty_header(struct ppl_log *log) in ppl_write_empty_header() argument
1039 struct md_rdev *rdev = log->rdev; in ppl_write_empty_header()
1052 log->rdev->ppl.size, GFP_NOIO, 0); in ppl_write_empty_header()
1054 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature); in ppl_write_empty_header()
1068 static int ppl_load_distributed(struct ppl_log *log) in ppl_load_distributed() argument
1070 struct ppl_conf *ppl_conf = log->ppl_conf; in ppl_load_distributed()
1071 struct md_rdev *rdev = log->rdev; in ppl_load_distributed()
1165 /* attempt to recover from log if we are starting a dirty array */ in ppl_load_distributed()
1167 ret = ppl_recover(log, pplhdr, pplhdr_offset); in ppl_load_distributed()
1171 ret = ppl_write_empty_header(log); in ppl_load_distributed()
1190 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_load() local
1193 if (!log->rdev) in ppl_load()
1196 ret = ppl_load_distributed(log); in ppl_load()
1296 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) in ppl_init_child_log() argument
1300 log->use_multippl = true; in ppl_init_child_log()
1302 &log->ppl_conf->mddev->flags); in ppl_init_child_log()
1303 log->entry_space = PPL_SPACE_SIZE; in ppl_init_child_log()
1305 log->use_multippl = false; in ppl_init_child_log()
1306 log->entry_space = (log->rdev->ppl.size << 9) - in ppl_init_child_log()
1309 log->next_io_sector = rdev->ppl.sector; in ppl_init_child_log()
1312 log->wb_cache_on = true; in ppl_init_child_log()
1323 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n", in ppl_init_log()
1401 struct ppl_log *log = &ppl_conf->child_logs[i]; in ppl_init_log() local
1404 mutex_init(&log->io_mutex); in ppl_init_log()
1405 spin_lock_init(&log->io_list_lock); in ppl_init_log()
1406 INIT_LIST_HEAD(&log->io_list); in ppl_init_log()
1408 log->ppl_conf = ppl_conf; in ppl_init_log()
1409 log->rdev = rdev; in ppl_init_log()
1416 ppl_init_child_log(log, rdev); in ppl_init_log()
1452 struct ppl_log *log; in ppl_modify_log() local
1468 log = &ppl_conf->child_logs[rdev->raid_disk]; in ppl_modify_log()
1470 mutex_lock(&log->io_mutex); in ppl_modify_log()
1474 log->rdev = rdev; in ppl_modify_log()
1475 ret = ppl_write_empty_header(log); in ppl_modify_log()
1476 ppl_init_child_log(log, rdev); in ppl_modify_log()
1479 log->rdev = NULL; in ppl_modify_log()
1481 mutex_unlock(&log->io_mutex); in ppl_modify_log()