Lines Matching +full:valid +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
7 /* LBR Branch Select valid bits */
33 #define LBR_NOT_SUPP -1 /* unsupported filter */
56 u64 valid:1; member
92 u32 shift = 64 - boot_cpu_data.x86_virt_bits; in sign_ext_branch_ip()
100 int br_sel = cpuc->br_sel, offset, type, i, j; in amd_pmu_lbr_filter()
110 for (i = 0; i < cpuc->lbr_stack.nr; i++) { in amd_pmu_lbr_filter()
111 from = cpuc->lbr_entries[i].from; in amd_pmu_lbr_filter()
112 to = cpuc->lbr_entries[i].to; in amd_pmu_lbr_filter()
121 cpuc->lbr_entries[i].from += offset; in amd_pmu_lbr_filter()
128 cpuc->lbr_entries[i].from = 0; /* mark invalid */ in amd_pmu_lbr_filter()
133 cpuc->lbr_entries[i].type = common_branch_type(type); in amd_pmu_lbr_filter()
140 for (i = 0; i < cpuc->lbr_stack.nr; ) { in amd_pmu_lbr_filter()
141 if (!cpuc->lbr_entries[i].from) { in amd_pmu_lbr_filter()
143 while (++j < cpuc->lbr_stack.nr) in amd_pmu_lbr_filter()
144 cpuc->lbr_entries[j - 1] = cpuc->lbr_entries[j]; in amd_pmu_lbr_filter()
145 cpuc->lbr_stack.nr--; in amd_pmu_lbr_filter()
146 if (!cpuc->lbr_entries[i].from) in amd_pmu_lbr_filter()
163 struct perf_branch_entry *br = cpuc->lbr_entries; in amd_pmu_lbr_read()
167 if (!cpuc->lbr_users) in amd_pmu_lbr_read()
175 * Check if a branch has been logged; if valid = 0, spec = 0 in amd_pmu_lbr_read()
179 if ((!entry.to.split.valid && !entry.to.split.spec) || in amd_pmu_lbr_read()
192 * the valid and spec bits. in amd_pmu_lbr_read()
194 * When valid = 0, spec = 0, no branch was recorded and the in amd_pmu_lbr_read()
197 * When valid = 0, spec = 1, the recorded branch was in amd_pmu_lbr_read()
200 * When valid = 1, spec = 0, the recorded branch was in amd_pmu_lbr_read()
201 * non-speculative but took the correct path. in amd_pmu_lbr_read()
203 * When valid = 1, spec = 1, the recorded branch was in amd_pmu_lbr_read()
206 idx = (entry.to.split.valid << 1) | entry.to.split.spec; in amd_pmu_lbr_read()
211 cpuc->lbr_stack.nr = out; in amd_pmu_lbr_read()
217 cpuc->lbr_stack.hw_idx = 0; in amd_pmu_lbr_read()
247 struct hw_perf_event_extra *reg = &event->hw.branch_reg; in amd_pmu_lbr_setup_filter()
248 u64 br_type = event->attr.branch_sample_type; in amd_pmu_lbr_setup_filter()
249 u64 mask = 0, v; in amd_pmu_lbr_setup_filter() local
254 return -EOPNOTSUPP; in amd_pmu_lbr_setup_filter()
257 mask |= X86_BR_USER; in amd_pmu_lbr_setup_filter()
260 mask |= X86_BR_KERNEL; in amd_pmu_lbr_setup_filter()
265 mask |= X86_BR_ANY; in amd_pmu_lbr_setup_filter()
268 mask |= X86_BR_ANY_CALL; in amd_pmu_lbr_setup_filter()
271 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; in amd_pmu_lbr_setup_filter()
274 mask |= X86_BR_IND_CALL; in amd_pmu_lbr_setup_filter()
277 mask |= X86_BR_JCC; in amd_pmu_lbr_setup_filter()
280 mask |= X86_BR_IND_JMP; in amd_pmu_lbr_setup_filter()
283 mask |= X86_BR_CALL | X86_BR_ZERO_CALL; in amd_pmu_lbr_setup_filter()
286 mask |= X86_BR_TYPE_SAVE; in amd_pmu_lbr_setup_filter()
288 reg->reg = mask; in amd_pmu_lbr_setup_filter()
289 mask = 0; in amd_pmu_lbr_setup_filter()
297 return -EOPNOTSUPP; in amd_pmu_lbr_setup_filter()
300 mask |= v; in amd_pmu_lbr_setup_filter()
304 reg->config = mask ^ LBR_SELECT_MASK; in amd_pmu_lbr_setup_filter()
315 event->attach_state |= PERF_ATTACH_SCHED_CB; in amd_pmu_lbr_hw_config()
334 cpuc->last_task_ctx = NULL; in amd_pmu_lbr_reset()
335 cpuc->last_log_id = 0; in amd_pmu_lbr_reset()
342 struct hw_perf_event_extra *reg = &event->hw.branch_reg; in amd_pmu_lbr_add()
348 cpuc->lbr_select = 1; in amd_pmu_lbr_add()
349 cpuc->lbr_sel->config = reg->config; in amd_pmu_lbr_add()
350 cpuc->br_sel = reg->reg; in amd_pmu_lbr_add()
353 perf_sched_cb_inc(event->pmu); in amd_pmu_lbr_add()
355 if (!cpuc->lbr_users++ && !event->total_time_running) in amd_pmu_lbr_add()
367 cpuc->lbr_select = 0; in amd_pmu_lbr_del()
369 cpuc->lbr_users--; in amd_pmu_lbr_del()
370 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_lbr_del()
371 perf_sched_cb_dec(event->pmu); in amd_pmu_lbr_del()
383 if (cpuc->lbr_users && sched_in) in amd_pmu_lbr_sched_task()
392 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_enable_all()
396 if (cpuc->lbr_select) { in amd_pmu_lbr_enable_all()
397 lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK; in amd_pmu_lbr_enable_all()
414 if (!cpuc->lbr_users || !x86_pmu.lbr_nr) in amd_pmu_lbr_disable_all()
425 return -EOPNOTSUPP; in amd_pmu_lbr_init()
431 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); in amd_pmu_lbr_init()