Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
27 * We don't reserve any space for the ticket - we are going to steal whatever
45 tic->t_curr_res = 0; in xlog_cil_ticket_alloc()
46 tic->t_iclog_hdrs = 0; in xlog_cil_ticket_alloc()
53 struct xlog *log = cil->xc_log; in xlog_cil_set_iclog_hdr_count()
55 atomic_set(&cil->xc_iclog_hdrs, in xlog_cil_set_iclog_hdr_count()
57 (log->l_iclog_size - log->l_iclog_hsize))); in xlog_cil_set_iclog_hdr_count()
65 * Note: for this to be used in a non-racy manner, it has to be called with
74 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) in xlog_item_in_current_chkpt()
82 return lip->li_seq == READ_ONCE(cil->xc_current_sequence); in xlog_item_in_current_chkpt()
89 return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip); in xfs_log_item_in_current_chkpt()
93 * Unavoidable forward declaration - xlog_cil_push_work() calls
104 INIT_LIST_HEAD(&ctx->committing); in xlog_cil_ctx_alloc()
105 INIT_LIST_HEAD(&ctx->busy_extents.extent_list); in xlog_cil_ctx_alloc()
106 INIT_LIST_HEAD(&ctx->log_items); in xlog_cil_ctx_alloc()
107 INIT_LIST_HEAD(&ctx->lv_chain); in xlog_cil_ctx_alloc()
108 INIT_WORK(&ctx->push_work, xlog_cil_push_work); in xlog_cil_ctx_alloc()
116 * will be accessing or modifying the per-cpu counters.
126 for_each_cpu(cpu, &ctx->cil_pcpmask) { in xlog_cil_push_pcp_aggregate()
127 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_push_pcp_aggregate()
129 ctx->ticket->t_curr_res += cilpcp->space_reserved; in xlog_cil_push_pcp_aggregate()
130 cilpcp->space_reserved = 0; in xlog_cil_push_pcp_aggregate()
132 if (!list_empty(&cilpcp->busy_extents)) { in xlog_cil_push_pcp_aggregate()
133 list_splice_init(&cilpcp->busy_extents, in xlog_cil_push_pcp_aggregate()
134 &ctx->busy_extents.extent_list); in xlog_cil_push_pcp_aggregate()
136 if (!list_empty(&cilpcp->log_items)) in xlog_cil_push_pcp_aggregate()
137 list_splice_init(&cilpcp->log_items, &ctx->log_items); in xlog_cil_push_pcp_aggregate()
144 cilpcp->space_used = 0; in xlog_cil_push_pcp_aggregate()
149 * Aggregate the CIL per-cpu space used counters into the global atomic value.
150 * This is called when the per-cpu counter aggregation will first pass the soft
151 * limit threshold so we can switch to atomic counter aggregation for accurate
162 /* Trigger atomic updates then aggregate only for the first caller */ in xlog_cil_insert_pcp_aggregate()
163 if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) in xlog_cil_insert_pcp_aggregate()
172 for_each_cpu(cpu, &ctx->cil_pcpmask) { in xlog_cil_insert_pcp_aggregate()
173 struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_insert_pcp_aggregate()
174 int old = READ_ONCE(cilpcp->space_used); in xlog_cil_insert_pcp_aggregate()
176 while (!try_cmpxchg(&cilpcp->space_used, &old, 0)) in xlog_cil_insert_pcp_aggregate()
180 atomic_add(count, &ctx->space_used); in xlog_cil_insert_pcp_aggregate()
189 set_bit(XLOG_CIL_EMPTY, &cil->xc_flags); in xlog_cil_ctx_switch()
190 set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags); in xlog_cil_ctx_switch()
191 ctx->sequence = ++cil->xc_current_sequence; in xlog_cil_ctx_switch()
192 ctx->cil = cil; in xlog_cil_ctx_switch()
193 cil->xc_ctx = ctx; in xlog_cil_ctx_switch()
210 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); in xlog_cil_init_post_recovery()
211 log->l_cilp->xc_ctx->sequence = 1; in xlog_cil_init_post_recovery()
212 xlog_cil_set_iclog_hdr_count(log->l_cilp); in xlog_cil_init_post_recovery()
277 list_for_each_entry(lip, &tp->t_items, li_trans) { in xlog_cil_alloc_shadow_bufs()
285 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) in xlog_cil_alloc_shadow_bufs()
289 lip->li_ops->iop_size(lip, &niovecs, &nbytes); in xlog_cil_alloc_shadow_bufs()
303 * We 64-bit align the length of each iovec so that the start of in xlog_cil_alloc_shadow_bufs()
312 * Then round nbytes up to 64-bit alignment so that the initial in xlog_cil_alloc_shadow_bufs()
320 * The data buffer needs to start 64-bit aligned, so round up in xlog_cil_alloc_shadow_bufs()
330 if (!lip->li_lv_shadow || in xlog_cil_alloc_shadow_bufs()
331 buf_size > lip->li_lv_shadow->lv_size) { in xlog_cil_alloc_shadow_bufs()
335 * same reason - we don't need to zero the data area in in xlog_cil_alloc_shadow_bufs()
339 kvfree(lip->li_lv_shadow); in xlog_cil_alloc_shadow_bufs()
344 INIT_LIST_HEAD(&lv->lv_list); in xlog_cil_alloc_shadow_bufs()
345 lv->lv_item = lip; in xlog_cil_alloc_shadow_bufs()
346 lv->lv_size = buf_size; in xlog_cil_alloc_shadow_bufs()
348 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; in xlog_cil_alloc_shadow_bufs()
350 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; in xlog_cil_alloc_shadow_bufs()
351 lip->li_lv_shadow = lv; in xlog_cil_alloc_shadow_bufs()
354 lv = lip->li_lv_shadow; in xlog_cil_alloc_shadow_bufs()
356 lv->lv_buf_len = XFS_LOG_VEC_ORDERED; in xlog_cil_alloc_shadow_bufs()
358 lv->lv_buf_len = 0; in xlog_cil_alloc_shadow_bufs()
359 lv->lv_bytes = 0; in xlog_cil_alloc_shadow_bufs()
362 /* Ensure the lv is set up according to ->iop_size */ in xlog_cil_alloc_shadow_bufs()
363 lv->lv_niovecs = niovecs; in xlog_cil_alloc_shadow_bufs()
366 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); in xlog_cil_alloc_shadow_bufs()
383 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) in xfs_cil_prepare_item()
384 *diff_len += lv->lv_bytes; in xfs_cil_prepare_item()
394 if (lv->lv_item->li_ops->iop_pin) in xfs_cil_prepare_item()
395 lv->lv_item->li_ops->iop_pin(lv->lv_item); in xfs_cil_prepare_item()
396 lv->lv_item->li_lv_shadow = NULL; in xfs_cil_prepare_item()
398 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); in xfs_cil_prepare_item()
400 *diff_len -= old_lv->lv_bytes; in xfs_cil_prepare_item()
401 lv->lv_item->li_lv_shadow = old_lv; in xfs_cil_prepare_item()
405 lv->lv_item->li_lv = lv; in xfs_cil_prepare_item()
413 if (!lv->lv_item->li_seq) in xfs_cil_prepare_item()
414 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; in xfs_cil_prepare_item()
421 * changes on the log item. This enables us to relog the item in memory and
427 * dependent on the current state of the vector in the CIL - the shadow lv is
430 * lv, then simple swap it out for the shadow lv. We don't free it - that is
441 * to the copied region inside the buffer we just allocated. This allows us to
454 if (list_empty(&tp->t_items)) { in xlog_cil_insert_format_items()
459 list_for_each_entry(lip, &tp->t_items, li_trans) { in xlog_cil_insert_format_items()
466 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) in xlog_cil_insert_format_items()
473 shadow = lip->li_lv_shadow; in xlog_cil_insert_format_items()
474 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) in xlog_cil_insert_format_items()
478 if (!shadow->lv_niovecs && !ordered) in xlog_cil_insert_format_items()
482 old_lv = lip->li_lv; in xlog_cil_insert_format_items()
483 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { in xlog_cil_insert_format_items()
485 lv = lip->li_lv; in xlog_cil_insert_format_items()
494 *diff_len -= lv->lv_bytes; in xlog_cil_insert_format_items()
496 /* Ensure the lv is set up according to ->iop_size */ in xlog_cil_insert_format_items()
497 lv->lv_niovecs = shadow->lv_niovecs; in xlog_cil_insert_format_items()
500 lv->lv_buf_len = 0; in xlog_cil_insert_format_items()
501 lv->lv_bytes = 0; in xlog_cil_insert_format_items()
502 lv->lv_buf = (char *)lv + in xlog_cil_insert_format_items()
503 xlog_cil_iovec_space(lv->lv_niovecs); in xlog_cil_insert_format_items()
507 lv->lv_item = lip; in xlog_cil_insert_format_items()
510 ASSERT(lip->li_lv == NULL); in xlog_cil_insert_format_items()
515 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); in xlog_cil_insert_format_items()
516 lip->li_ops->iop_format(lip, lv); in xlog_cil_insert_format_items()
532 if (waitqueue_active(&log->l_cilp->xc_push_wait)) in xlog_cil_over_hard_limit()
552 struct xfs_cil *cil = log->l_cilp; in xlog_cil_insert_items()
553 struct xfs_cil_ctx *ctx = cil->xc_ctx; in xlog_cil_insert_items()
575 len -= released_space; in xlog_cil_insert_items()
578 * Grab the per-cpu pointer for the CIL before we start any accounting. in xlog_cil_insert_items()
579 * That ensures that we are running with pre-emption disabled and so we in xlog_cil_insert_items()
584 cilpcp = this_cpu_ptr(cil->xc_pcp); in xlog_cil_insert_items()
587 if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask)) in xlog_cil_insert_items()
588 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask); in xlog_cil_insert_items()
593 * unnecessarily do an atomic op in the fast path here. We can clear the in xlog_cil_insert_items()
597 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) && in xlog_cil_insert_items()
598 test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) in xlog_cil_insert_items()
599 ctx_res = ctx->ticket->t_unit_res; in xlog_cil_insert_items()
603 * locked atomic operation, so we can check the value before we do any in xlog_cil_insert_items()
604 * real atomic ops in the fast path. If we've already taken the CIL unit in xlog_cil_insert_items()
616 * The cil->xc_ctx_lock provides the serialisation necessary for safely in xlog_cil_insert_items()
619 space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len; in xlog_cil_insert_items()
620 if (atomic_read(&cil->xc_iclog_hdrs) > 0 || in xlog_cil_insert_items()
622 split_res = log->l_iclog_hsize + in xlog_cil_insert_items()
625 ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1); in xlog_cil_insert_items()
627 ctx_res = split_res * tp->t_ticket->t_iclog_hdrs; in xlog_cil_insert_items()
628 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs); in xlog_cil_insert_items()
630 cilpcp->space_reserved += ctx_res; in xlog_cil_insert_items()
634 * percpu count into the global count if over the per-cpu threshold. in xlog_cil_insert_items()
636 if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) { in xlog_cil_insert_items()
637 atomic_add(len, &ctx->space_used); in xlog_cil_insert_items()
638 } else if (cilpcp->space_used + len > in xlog_cil_insert_items()
640 space_used = atomic_add_return(cilpcp->space_used + len, in xlog_cil_insert_items()
641 &ctx->space_used); in xlog_cil_insert_items()
642 cilpcp->space_used = 0; in xlog_cil_insert_items()
646 * transition to the global atomic counter. in xlog_cil_insert_items()
651 cilpcp->space_used += len; in xlog_cil_insert_items()
654 if (!list_empty(&tp->t_busy)) in xlog_cil_insert_items()
655 list_splice_init(&tp->t_busy, &cilpcp->busy_extents); in xlog_cil_insert_items()
663 order = atomic_inc_return(&ctx->order_id); in xlog_cil_insert_items()
664 list_for_each_entry(lip, &tp->t_items, li_trans) { in xlog_cil_insert_items()
666 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) in xlog_cil_insert_items()
669 lip->li_order_id = order; in xlog_cil_insert_items()
670 if (!list_empty(&lip->li_cil)) in xlog_cil_insert_items()
672 list_add_tail(&lip->li_cil, &cilpcp->log_items); in xlog_cil_insert_items()
680 tp->t_ticket->t_curr_res -= ctx_res + len; in xlog_cil_insert_items()
681 if (WARN_ON(tp->t_ticket->t_curr_res < 0)) { in xlog_cil_insert_items()
682 xfs_warn(log->l_mp, "Transaction log reservation overrun:"); in xlog_cil_insert_items()
683 xfs_warn(log->l_mp, in xlog_cil_insert_items()
686 xfs_warn(log->l_mp, " split region headers: %d bytes", in xlog_cil_insert_items()
688 xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res); in xlog_cil_insert_items()
704 spin_lock(&ailp->ail_lock); in xlog_cil_ail_insert_batch()
705 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ in xlog_cil_ail_insert_batch()
711 if (lip->li_ops->iop_unpin) in xlog_cil_ail_insert_batch()
712 lip->li_ops->iop_unpin(lip, 0); in xlog_cil_ail_insert_batch()
727 * +-------------+------------+----------------+
760 struct xfs_ail *ailp = ctx->cil->xc_log->l_ailp; in xlog_cil_ail_insert()
774 * xfs_ail_update_finish() so that tail space and space-based wakeups in xlog_cil_ail_insert()
777 ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 || in xlog_cil_ail_insert()
779 spin_lock(&ailp->ail_lock); in xlog_cil_ail_insert()
780 xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn); in xlog_cil_ail_insert()
781 old_head = ailp->ail_head_lsn; in xlog_cil_ail_insert()
782 ailp->ail_head_lsn = ctx->commit_lsn; in xlog_cil_ail_insert()
794 xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn); in xlog_cil_ail_insert()
797 list_for_each_entry(lv, &ctx->lv_chain, lv_list) { in xlog_cil_ail_insert()
798 struct xfs_log_item *lip = lv->lv_item; in xlog_cil_ail_insert()
802 set_bit(XFS_LI_ABORTED, &lip->li_flags); in xlog_cil_ail_insert()
804 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { in xlog_cil_ail_insert()
805 lip->li_ops->iop_release(lip); in xlog_cil_ail_insert()
809 if (lip->li_ops->iop_committed) in xlog_cil_ail_insert()
810 item_lsn = lip->li_ops->iop_committed(lip, in xlog_cil_ail_insert()
811 ctx->start_lsn); in xlog_cil_ail_insert()
813 item_lsn = ctx->start_lsn; in xlog_cil_ail_insert()
815 /* item_lsn of -1 means the item needs no further processing */ in xlog_cil_ail_insert()
816 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) in xlog_cil_ail_insert()
824 ASSERT(xlog_is_shutdown(ailp->ail_log)); in xlog_cil_ail_insert()
825 if (lip->li_ops->iop_unpin) in xlog_cil_ail_insert()
826 lip->li_ops->iop_unpin(lip, 1); in xlog_cil_ail_insert()
830 if (item_lsn != ctx->start_lsn) { in xlog_cil_ail_insert()
839 spin_lock(&ailp->ail_lock); in xlog_cil_ail_insert()
840 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) in xlog_cil_ail_insert()
843 spin_unlock(&ailp->ail_lock); in xlog_cil_ail_insert()
844 if (lip->li_ops->iop_unpin) in xlog_cil_ail_insert()
845 lip->li_ops->iop_unpin(lip, 0); in xlog_cil_ail_insert()
850 log_items[i++] = lv->lv_item; in xlog_cil_ail_insert()
853 LOG_ITEM_BATCH_SIZE, ctx->start_lsn); in xlog_cil_ail_insert()
861 ctx->start_lsn); in xlog_cil_ail_insert()
863 spin_lock(&ailp->ail_lock); in xlog_cil_ail_insert()
865 spin_unlock(&ailp->ail_lock); in xlog_cil_ail_insert()
876 list_del_init(&lv->lv_list); in xlog_cil_free_logvec()
890 struct xfs_mount *mp = ctx->cil->xc_log->l_mp; in xlog_cil_committed()
891 bool abort = xlog_is_shutdown(ctx->cil->xc_log); in xlog_cil_committed()
901 spin_lock(&ctx->cil->xc_push_lock); in xlog_cil_committed()
902 wake_up_all(&ctx->cil->xc_start_wait); in xlog_cil_committed()
903 wake_up_all(&ctx->cil->xc_commit_wait); in xlog_cil_committed()
904 spin_unlock(&ctx->cil->xc_push_lock); in xlog_cil_committed()
909 xfs_extent_busy_sort(&ctx->busy_extents.extent_list); in xlog_cil_committed()
910 xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list, in xlog_cil_committed()
913 spin_lock(&ctx->cil->xc_push_lock); in xlog_cil_committed()
914 list_del(&ctx->committing); in xlog_cil_committed()
915 spin_unlock(&ctx->cil->xc_push_lock); in xlog_cil_committed()
917 xlog_cil_free_logvec(&ctx->lv_chain); in xlog_cil_committed()
919 if (!list_empty(&ctx->busy_extents.extent_list)) { in xlog_cil_committed()
920 ctx->busy_extents.mount = mp; in xlog_cil_committed()
921 ctx->busy_extents.owner = ctx; in xlog_cil_committed()
922 xfs_discard_extents(mp, &ctx->busy_extents); in xlog_cil_committed()
937 list_del(&ctx->iclog_entry); in xlog_cil_process_committed()
953 struct xfs_cil *cil = ctx->cil; in xlog_cil_set_ctx_write_state()
954 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_cil_set_ctx_write_state()
956 ASSERT(!ctx->commit_lsn); in xlog_cil_set_ctx_write_state()
957 if (!ctx->start_lsn) { in xlog_cil_set_ctx_write_state()
958 spin_lock(&cil->xc_push_lock); in xlog_cil_set_ctx_write_state()
966 ctx->start_lsn = lsn; in xlog_cil_set_ctx_write_state()
967 wake_up_all(&cil->xc_start_wait); in xlog_cil_set_ctx_write_state()
968 spin_unlock(&cil->xc_push_lock); in xlog_cil_set_ctx_write_state()
975 spin_lock(&cil->xc_log->l_icloglock); in xlog_cil_set_ctx_write_state()
976 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; in xlog_cil_set_ctx_write_state()
977 spin_unlock(&cil->xc_log->l_icloglock); in xlog_cil_set_ctx_write_state()
986 atomic_inc(&iclog->ic_refcnt); in xlog_cil_set_ctx_write_state()
996 spin_lock(&cil->xc_log->l_icloglock); in xlog_cil_set_ctx_write_state()
997 list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); in xlog_cil_set_ctx_write_state()
998 spin_unlock(&cil->xc_log->l_icloglock); in xlog_cil_set_ctx_write_state()
1005 spin_lock(&cil->xc_push_lock); in xlog_cil_set_ctx_write_state()
1006 ctx->commit_iclog = iclog; in xlog_cil_set_ctx_write_state()
1007 ctx->commit_lsn = lsn; in xlog_cil_set_ctx_write_state()
1008 wake_up_all(&cil->xc_commit_wait); in xlog_cil_set_ctx_write_state()
1009 spin_unlock(&cil->xc_push_lock); in xlog_cil_set_ctx_write_state()
1032 spin_lock(&cil->xc_push_lock); in xlog_cil_order_write()
1033 list_for_each_entry(ctx, &cil->xc_committing, committing) { in xlog_cil_order_write()
1039 if (xlog_is_shutdown(cil->xc_log)) { in xlog_cil_order_write()
1040 spin_unlock(&cil->xc_push_lock); in xlog_cil_order_write()
1041 return -EIO; in xlog_cil_order_write()
1048 if (ctx->sequence >= sequence) in xlog_cil_order_write()
1054 if (!ctx->start_lsn) { in xlog_cil_order_write()
1055 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); in xlog_cil_order_write()
1060 if (!ctx->commit_lsn) { in xlog_cil_order_write()
1061 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); in xlog_cil_order_write()
1067 spin_unlock(&cil->xc_push_lock); in xlog_cil_order_write()
1074 * sequence order so that log recovery will always use in-order start LSNs when
1082 struct xlog *log = ctx->cil->xc_log; in xlog_cil_write_chain()
1085 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); in xlog_cil_write_chain()
1088 return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len); in xlog_cil_write_chain()
1101 struct xlog *log = ctx->cil->xc_log; in xlog_cil_write_commit_record()
1104 .oh_tid = cpu_to_be32(ctx->ticket->t_tid), in xlog_cil_write_commit_record()
1121 return -EIO; in xlog_cil_write_commit_record()
1123 error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); in xlog_cil_write_commit_record()
1128 ctx->ticket->t_curr_res -= reg.i_len; in xlog_cil_write_commit_record()
1129 error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len); in xlog_cil_write_commit_record()
1160 struct xlog_ticket *tic = ctx->ticket; in xlog_cil_build_trans_hdr()
1161 __be32 tid = cpu_to_be32(tic->t_tid); in xlog_cil_build_trans_hdr()
1166 hdr->oph[0].oh_tid = tid; in xlog_cil_build_trans_hdr()
1167 hdr->oph[0].oh_clientid = XFS_TRANSACTION; in xlog_cil_build_trans_hdr()
1168 hdr->oph[0].oh_flags = XLOG_START_TRANS; in xlog_cil_build_trans_hdr()
1171 hdr->lhdr[0].i_addr = &hdr->oph[0]; in xlog_cil_build_trans_hdr()
1172 hdr->lhdr[0].i_len = sizeof(struct xlog_op_header); in xlog_cil_build_trans_hdr()
1173 hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER; in xlog_cil_build_trans_hdr()
1176 hdr->oph[1].oh_tid = tid; in xlog_cil_build_trans_hdr()
1177 hdr->oph[1].oh_clientid = XFS_TRANSACTION; in xlog_cil_build_trans_hdr()
1178 hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header)); in xlog_cil_build_trans_hdr()
1181 hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC; in xlog_cil_build_trans_hdr()
1182 hdr->thdr.th_type = XFS_TRANS_CHECKPOINT; in xlog_cil_build_trans_hdr()
1183 hdr->thdr.th_tid = tic->t_tid; in xlog_cil_build_trans_hdr()
1184 hdr->thdr.th_num_items = num_iovecs; in xlog_cil_build_trans_hdr()
1187 hdr->lhdr[1].i_addr = &hdr->oph[1]; in xlog_cil_build_trans_hdr()
1188 hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) + in xlog_cil_build_trans_hdr()
1190 hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR; in xlog_cil_build_trans_hdr()
1192 lvhdr->lv_niovecs = 2; in xlog_cil_build_trans_hdr()
1193 lvhdr->lv_iovecp = &hdr->lhdr[0]; in xlog_cil_build_trans_hdr()
1194 lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len; in xlog_cil_build_trans_hdr()
1196 tic->t_curr_res -= lvhdr->lv_bytes; in xlog_cil_build_trans_hdr()
1216 return l1->lv_order_id > l2->lv_order_id; in xlog_cil_order_cmp()
1235 while (!list_empty(&ctx->log_items)) { in xlog_cil_build_lv_chain()
1239 item = list_first_entry(&ctx->log_items, in xlog_cil_build_lv_chain()
1242 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) { in xlog_cil_build_lv_chain()
1243 list_move(&item->li_cil, whiteouts); in xlog_cil_build_lv_chain()
1248 lv = item->li_lv; in xlog_cil_build_lv_chain()
1249 lv->lv_order_id = item->li_order_id; in xlog_cil_build_lv_chain()
1252 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) in xlog_cil_build_lv_chain()
1253 *num_bytes += lv->lv_bytes; in xlog_cil_build_lv_chain()
1254 *num_iovecs += lv->lv_niovecs; in xlog_cil_build_lv_chain()
1255 list_add_tail(&lv->lv_list, &ctx->lv_chain); in xlog_cil_build_lv_chain()
1257 list_del_init(&item->li_cil); in xlog_cil_build_lv_chain()
1258 item->li_order_id = 0; in xlog_cil_build_lv_chain()
1259 item->li_lv = NULL; in xlog_cil_build_lv_chain()
1270 list_del_init(&item->li_cil); in xlog_cil_cleanup_whiteouts()
1272 item->li_ops->iop_unpin(item, 1); in xlog_cil_cleanup_whiteouts()
1281 * flushed and we don't need to do anything - the caller will wait for it to
1303 struct xfs_cil *cil = ctx->cil; in xlog_cil_push_work()
1304 struct xlog *log = cil->xc_log; in xlog_cil_push_work()
1317 new_ctx->ticket = xlog_cil_ticket_alloc(log); in xlog_cil_push_work()
1319 down_write(&cil->xc_ctx_lock); in xlog_cil_push_work()
1321 spin_lock(&cil->xc_push_lock); in xlog_cil_push_work()
1322 push_seq = cil->xc_push_seq; in xlog_cil_push_work()
1323 ASSERT(push_seq <= ctx->sequence); in xlog_cil_push_work()
1324 push_commit_stable = cil->xc_push_commit_stable; in xlog_cil_push_work()
1325 cil->xc_push_commit_stable = false; in xlog_cil_push_work()
1331 * to the new context. The ctx->xc_push_lock provides the serialisation in xlog_cil_push_work()
1335 if (waitqueue_active(&cil->xc_push_wait)) in xlog_cil_push_work()
1336 wake_up_all(&cil->xc_push_wait); in xlog_cil_push_work()
1345 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { in xlog_cil_push_work()
1346 cil->xc_push_seq = 0; in xlog_cil_push_work()
1347 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_work()
1353 if (push_seq < ctx->sequence) { in xlog_cil_push_work()
1354 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_work()
1382 list_add(&ctx->committing, &cil->xc_committing); in xlog_cil_push_work()
1383 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_work()
1390 * though - we need to synchronise with previous and future commits so in xlog_cil_push_work()
1403 * that higher sequences will wait for us to write out a commit record in xlog_cil_push_work()
1406 * xfs_log_force_seq requires us to mirror the new sequence into the cil in xlog_cil_push_work()
1412 spin_lock(&cil->xc_push_lock); in xlog_cil_push_work()
1414 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_work()
1415 up_write(&cil->xc_ctx_lock); in xlog_cil_push_work()
1422 list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp); in xlog_cil_push_work()
1433 list_add(&lvhdr.lv_list, &ctx->lv_chain); in xlog_cil_push_work()
1456 ticket = ctx->ticket; in xlog_cil_push_work()
1461 * checks for this - ACTIVE can be either a past completed iclog or a in xlog_cil_push_work()
1470 spin_lock(&log->l_icloglock); in xlog_cil_push_work()
1471 if (ctx->start_lsn != ctx->commit_lsn) { in xlog_cil_push_work()
1474 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); in xlog_cil_push_work()
1475 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { in xlog_cil_push_work()
1481 xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); in xlog_cil_push_work()
1482 spin_lock(&log->l_icloglock); in xlog_cil_push_work()
1486 * We need to issue a pre-flush so that the ordering for this in xlog_cil_push_work()
1489 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; in xlog_cil_push_work()
1502 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; in xlog_cil_push_work()
1504 ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_cil_push_work()
1505 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); in xlog_cil_push_work()
1506 ticket = ctx->ticket; in xlog_cil_push_work()
1507 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); in xlog_cil_push_work()
1511 spin_unlock(&log->l_icloglock); in xlog_cil_push_work()
1518 up_write(&cil->xc_ctx_lock); in xlog_cil_push_work()
1519 xfs_log_ticket_put(new_ctx->ticket); in xlog_cil_push_work()
1527 if (!ctx->commit_iclog) { in xlog_cil_push_work()
1528 xfs_log_ticket_ungrant(log, ctx->ticket); in xlog_cil_push_work()
1533 spin_lock(&log->l_icloglock); in xlog_cil_push_work()
1534 ticket = ctx->ticket; in xlog_cil_push_work()
1535 xlog_state_release_iclog(log, ctx->commit_iclog, ticket); in xlog_cil_push_work()
1537 spin_unlock(&log->l_icloglock); in xlog_cil_push_work()
1553 struct xfs_cil *cil = log->l_cilp; in xlog_cil_push_background()
1554 int space_used = atomic_read(&cil->xc_ctx->space_used); in xlog_cil_push_background()
1560 ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); in xlog_cil_push_background()
1564 * - we haven't used up all the space available yet; or in xlog_cil_push_background()
1565 * - we've already queued up a push; and in xlog_cil_push_background()
1566 * - we're not over the hard limit; and in xlog_cil_push_background()
1567 * - nothing has been over the hard limit. in xlog_cil_push_background()
1572 (cil->xc_push_seq == cil->xc_current_sequence && in xlog_cil_push_background()
1574 !waitqueue_active(&cil->xc_push_wait))) { in xlog_cil_push_background()
1575 up_read(&cil->xc_ctx_lock); in xlog_cil_push_background()
1579 spin_lock(&cil->xc_push_lock); in xlog_cil_push_background()
1580 if (cil->xc_push_seq < cil->xc_current_sequence) { in xlog_cil_push_background()
1581 cil->xc_push_seq = cil->xc_current_sequence; in xlog_cil_push_background()
1582 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); in xlog_cil_push_background()
1587 * because we are over the blocking threshold. The push_lock is still in xlog_cil_push_background()
1588 * held, so blocking threshold sleep/wakeup is still correctly in xlog_cil_push_background()
1591 up_read(&cil->xc_ctx_lock); in xlog_cil_push_background()
1600 * The ctx->xc_push_lock provides the serialisation necessary for safely in xlog_cil_push_background()
1604 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket); in xlog_cil_push_background()
1605 ASSERT(space_used < log->l_logsize); in xlog_cil_push_background()
1606 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock); in xlog_cil_push_background()
1610 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_background()
1628 * which defeats the purpose of having an async, non-blocking CIL force
1638 struct xfs_cil *cil = log->l_cilp; in xlog_cil_push_now()
1643 ASSERT(push_seq && push_seq <= cil->xc_current_sequence); in xlog_cil_push_now()
1647 flush_workqueue(cil->xc_push_wq); in xlog_cil_push_now()
1649 spin_lock(&cil->xc_push_lock); in xlog_cil_push_now()
1658 cil->xc_push_commit_stable = async; in xlog_cil_push_now()
1664 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) || in xlog_cil_push_now()
1665 push_seq <= cil->xc_push_seq) { in xlog_cil_push_now()
1666 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_now()
1670 cil->xc_push_seq = push_seq; in xlog_cil_push_now()
1671 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work); in xlog_cil_push_now()
1672 spin_unlock(&cil->xc_push_lock); in xlog_cil_push_now()
1679 struct xfs_cil *cil = log->l_cilp; in xlog_cil_empty()
1682 spin_lock(&cil->xc_push_lock); in xlog_cil_empty()
1683 if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) in xlog_cil_empty()
1685 spin_unlock(&cil->xc_push_lock); in xlog_cil_empty()
1707 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { in xlog_cil_process_intents()
1708 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE)) in xlog_cil_process_intents()
1711 ilip = lip->li_ops->iop_intent(lip); in xlog_cil_process_intents()
1714 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags); in xlog_cil_process_intents()
1716 len += ilip->li_lv->lv_bytes; in xlog_cil_process_intents()
1717 kvfree(ilip->li_lv); in xlog_cil_process_intents()
1718 ilip->li_lv = NULL; in xlog_cil_process_intents()
1721 lip->li_ops->iop_release(lip); in xlog_cil_process_intents()
1746 struct xfs_cil *cil = log->l_cilp; in xlog_cil_commit()
1758 down_read(&cil->xc_ctx_lock); in xlog_cil_commit()
1760 if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE) in xlog_cil_commit()
1766 xfs_log_ticket_regrant(log, tp->t_ticket); in xlog_cil_commit()
1768 xfs_log_ticket_ungrant(log, tp->t_ticket); in xlog_cil_commit()
1769 tp->t_ticket = NULL; in xlog_cil_commit()
1778 * to disk. If we don't, then the CIL checkpoint can race with us and in xlog_cil_commit()
1784 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { in xlog_cil_commit()
1786 if (lip->li_ops->iop_committing) in xlog_cil_commit()
1787 lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence); in xlog_cil_commit()
1790 *commit_seq = cil->xc_ctx->sequence; in xlog_cil_commit()
1792 /* xlog_cil_push_background() releases cil->xc_ctx_lock */ in xlog_cil_commit()
1805 xfs_csn_t seq = log->l_cilp->xc_current_sequence; in xlog_cil_flush()
1807 trace_xfs_log_force(log->l_mp, seq, _RET_IP_); in xlog_cil_flush()
1814 if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags)) in xlog_cil_flush()
1815 xfs_log_force(log->l_mp, 0); in xlog_cil_flush()
1833 struct xfs_cil *cil = log->l_cilp; in xlog_cil_force_seq()
1837 ASSERT(sequence <= cil->xc_current_sequence); in xlog_cil_force_seq()
1840 sequence = cil->xc_current_sequence; in xlog_cil_force_seq()
1841 trace_xfs_log_force(log->l_mp, sequence, _RET_IP_); in xlog_cil_force_seq()
1857 spin_lock(&cil->xc_push_lock); in xlog_cil_force_seq()
1858 list_for_each_entry(ctx, &cil->xc_committing, committing) { in xlog_cil_force_seq()
1866 if (ctx->sequence > sequence) in xlog_cil_force_seq()
1868 if (!ctx->commit_lsn) { in xlog_cil_force_seq()
1873 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_cil_force_seq()
1874 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); in xlog_cil_force_seq()
1877 if (ctx->sequence != sequence) in xlog_cil_force_seq()
1880 commit_lsn = ctx->commit_lsn; in xlog_cil_force_seq()
1898 if (sequence == cil->xc_current_sequence && in xlog_cil_force_seq()
1899 !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) { in xlog_cil_force_seq()
1900 spin_unlock(&cil->xc_push_lock); in xlog_cil_force_seq()
1904 spin_unlock(&cil->xc_push_lock); in xlog_cil_force_seq()
1915 spin_unlock(&cil->xc_push_lock); in xlog_cil_force_seq()
1933 return -ENOMEM; in xlog_cil_init()
1938 cil->xc_push_wq = alloc_workqueue("xfs-cil/%s", in xlog_cil_init()
1940 4, log->l_mp->m_super->s_id); in xlog_cil_init()
1941 if (!cil->xc_push_wq) in xlog_cil_init()
1944 cil->xc_log = log; in xlog_cil_init()
1945 cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp); in xlog_cil_init()
1946 if (!cil->xc_pcp) in xlog_cil_init()
1950 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu); in xlog_cil_init()
1951 INIT_LIST_HEAD(&cilpcp->busy_extents); in xlog_cil_init()
1952 INIT_LIST_HEAD(&cilpcp->log_items); in xlog_cil_init()
1955 INIT_LIST_HEAD(&cil->xc_committing); in xlog_cil_init()
1956 spin_lock_init(&cil->xc_push_lock); in xlog_cil_init()
1957 init_waitqueue_head(&cil->xc_push_wait); in xlog_cil_init()
1958 init_rwsem(&cil->xc_ctx_lock); in xlog_cil_init()
1959 init_waitqueue_head(&cil->xc_start_wait); in xlog_cil_init()
1960 init_waitqueue_head(&cil->xc_commit_wait); in xlog_cil_init()
1961 log->l_cilp = cil; in xlog_cil_init()
1968 destroy_workqueue(cil->xc_push_wq); in xlog_cil_init()
1971 return -ENOMEM; in xlog_cil_init()
1978 struct xfs_cil *cil = log->l_cilp; in xlog_cil_destroy()
1980 if (cil->xc_ctx) { in xlog_cil_destroy()
1981 if (cil->xc_ctx->ticket) in xlog_cil_destroy()
1982 xfs_log_ticket_put(cil->xc_ctx->ticket); in xlog_cil_destroy()
1983 kfree(cil->xc_ctx); in xlog_cil_destroy()
1986 ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)); in xlog_cil_destroy()
1987 free_percpu(cil->xc_pcp); in xlog_cil_destroy()
1988 destroy_workqueue(cil->xc_push_wq); in xlog_cil_destroy()