Home
last modified time | relevance | path

Searched full:masked (Results 1 – 25 of 705) sorted by relevance

12345678910>>...29

/linux-6.12.1/tools/perf/pmu-events/arch/x86/goldmont/
Dother.json20 "BriefDescription": "Cycles hardware interrupts are masked",
23 "EventName": "HW_INTERRUPTS.MASKED",
24 …"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disable…
29 "BriefDescription": "Cycles pending interrupts are masked",
33 …core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
/linux-6.12.1/tools/perf/pmu-events/arch/x86/goldmontplus/
Dother.json20 "BriefDescription": "Cycles hardware interrupts are masked",
23 "EventName": "HW_INTERRUPTS.MASKED",
24 …"PublicDescription": "Counts the number of core cycles during which interrupts are masked (disable…
29 "BriefDescription": "Cycles pending interrupts are masked",
33 …core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
/linux-6.12.1/arch/x86/math-emu/
Derrors.c322 /* Set summary bits iff exception isn't masked */ in FPU_exception()
387 /* Masked response */ in real_1op_NaN()
398 /* The masked response */ in real_1op_NaN()
435 /* Masked response */ in real_2op_NaN()
503 /* The masked response */ in arith_invalid()
518 /* The masked response */ in FPU_divide_by_zero()
535 partial_status |= flags; /* The masked response */ in set_precision_flag()
547 partial_status |= (SW_Precision | SW_C1); /* The masked response */ in set_precision_flag_up()
555 if (control_word & CW_Precision) { /* The masked response */ in set_precision_flag_down()
564 if (control_word & CW_Denormal) { /* The masked response */ in denormal_operand()
[all …]
/linux-6.12.1/drivers/net/wireless/ath/ath9k/
Dar9002_mac.c32 static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, in ar9002_hw_get_isr() argument
53 *masked = 0; in ar9002_hw_get_isr()
58 *masked = 0; in ar9002_hw_get_isr()
91 *masked = 0; in ar9002_hw_get_isr()
95 *masked = isr & ATH9K_INT_COMMON; in ar9002_hw_get_isr()
99 *masked |= ATH9K_INT_RX; in ar9002_hw_get_isr()
106 *masked |= ATH9K_INT_TX; in ar9002_hw_get_isr()
134 *masked |= mask2; in ar9002_hw_get_isr()
153 *masked |= ATH9K_INT_GENTIMER; in ar9002_hw_get_isr()
157 *masked |= ATH9K_INT_TIM_TIMER; in ar9002_hw_get_isr()
[all …]
Dar9003_mac.c183 static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked, in ar9003_hw_get_isr() argument
207 *masked = 0; in ar9003_hw_get_isr()
244 *masked = 0; in ar9003_hw_get_isr()
248 *masked = isr & ATH9K_INT_COMMON; in ar9003_hw_get_isr()
252 *masked |= ATH9K_INT_RXLP; in ar9003_hw_get_isr()
256 *masked |= ATH9K_INT_TX; in ar9003_hw_get_isr()
259 *masked |= ATH9K_INT_RXLP; in ar9003_hw_get_isr()
262 *masked |= ATH9K_INT_RXHP; in ar9003_hw_get_isr()
265 *masked |= ATH9K_INT_TX; in ar9003_hw_get_isr()
294 *masked |= ATH9K_INT_GENTIMER; in ar9003_hw_get_isr()
[all …]
/linux-6.12.1/drivers/gpu/drm/xe/regs/
Dxe_reg_defs.h26 * @masked: register is "masked", with upper 16bits used
30 u32 masked:1; member
69 * XE_REG_OPTION_MASKED - Register is "masked", with upper 16 bits marking the
73 * "Access: Masked". Registers with this option can have write operations to
83 #define XE_REG_OPTION_MASKED .masked = 1
/linux-6.12.1/drivers/vfio/platform/
Dvfio_platform_irq.c24 if (!irq_ctx->masked) { in vfio_platform_mask()
26 irq_ctx->masked = true; in vfio_platform_mask()
84 if (irq_ctx->masked) { in vfio_platform_unmask()
86 irq_ctx->masked = false; in vfio_platform_unmask()
157 if (!irq_ctx->masked) { in vfio_automasked_irq_handler()
162 irq_ctx->masked = true; in vfio_automasked_irq_handler()
204 * irq->masked effectively provides nested disables within the overall in vfio_set_trigger()
208 * irq->masked is initially false. in vfio_set_trigger()
320 vdev->irqs[i].masked = false; in vfio_platform_irq_init()
/linux-6.12.1/kernel/irq/
Dmigration.c96 bool masked; in __irq_move_irq() local
109 * Be careful vs. already masked interrupts. If this is a in __irq_move_irq()
113 masked = irqd_irq_masked(idata); in __irq_move_irq()
114 if (!masked) in __irq_move_irq()
117 if (!masked) in __irq_move_irq()
/linux-6.12.1/drivers/gpu/drm/xe/
Dxe_reg_sr.c129 "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n", in xe_reg_sr_add()
131 str_yes_no(e->reg.masked), in xe_reg_sr_add()
155 * If this is a masked register, need to set the upper 16 bits. in apply_one_mmio()
159 * When it's not masked, we have to read it from hardware, unless we are in apply_one_mmio()
162 if (reg.masked) in apply_one_mmio()
173 * - Masked registers can't have set_bits with upper bits set in apply_one_mmio()
280 drm_printf(p, "\tREG[0x%lx] clr=0x%08x set=0x%08x masked=%s mcr=%s\n", in xe_reg_sr_dump()
282 str_yes_no(entry->reg.masked), in xe_reg_sr_dump()
/linux-6.12.1/drivers/vfio/pci/
Dvfio_pci_intrs.c31 bool masked; member
101 /* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
129 if (!ctx->masked) { in __vfio_pci_intx_mask()
139 ctx->masked = true; in __vfio_pci_intx_mask()
185 if (ctx->masked && !vdev->virq_disabled) { in vfio_pci_intx_unmask_handler()
197 ctx->masked = (ret > 0); in vfio_pci_intx_unmask_handler()
234 ctx->masked = true; in vfio_intx_handler()
236 } else if (!ctx->masked && /* may be shared */ in vfio_intx_handler()
238 ctx->masked = true; in vfio_intx_handler()
280 * Fill the initial masked state based on virq_disabled. After in vfio_intx_enable()
[all …]
/linux-6.12.1/arch/powerpc/kvm/
Dbook3s_xive.c317 * XXX We could check if the interrupt is masked here and in xive_vm_h_xirr()
320 * if (masked) { in xive_vm_h_xirr()
322 * if (masked) { in xive_vm_h_xirr()
395 /* For each priority that is now masked */ in xive_vm_scan_for_rerouted_irqs()
572 if (state->guest_priority == MASKED) { in xive_vm_h_eoi()
574 if (state->guest_priority != MASKED) { in xive_vm_h_eoi()
915 * interrupt, thus leaving it effectively masked after in kvmppc_xive_attach_escalation()
1094 * Take the lock, set masked, try again if racing in xive_lock_and_mask()
1100 state->guest_priority = MASKED; in xive_lock_and_mask()
1109 if (old_prio == MASKED) in xive_lock_and_mask()
[all …]
Dbook3s_xive_native.c399 state->act_priority = MASKED; in kvmppc_xive_native_set_source()
401 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_native_set_source()
419 u32 server, u8 priority, bool masked, in kvmppc_xive_native_update_source_config() argument
433 priority, server, masked, state->act_server, in kvmppc_xive_native_update_source_config()
438 if (priority != MASKED && !masked) { in kvmppc_xive_native_update_source_config()
451 state->act_priority = MASKED; in kvmppc_xive_native_update_source_config()
455 rc = xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_native_update_source_config()
473 bool masked; in kvmppc_xive_native_set_source_config() local
494 masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >> in kvmppc_xive_native_set_source_config()
506 priority, masked, eisn); in kvmppc_xive_native_set_source_config()
[all …]
Dbook3s_xics.c154 if ((state->masked_pending || state->resend) && priority != MASKED) { in write_xive()
264 write_xive(xics, ics, state, state->server, MASKED, state->priority); in kvmppc_xics_int_off()
432 * If masked, bail out in icp_deliver_irq()
434 * Note: PAPR doesn't mention anything about masked pending in icp_deliver_irq()
437 * However that would have the effect of losing a masked in icp_deliver_irq()
440 * losing interrupts that occur while masked. in icp_deliver_irq()
446 if (state->priority == MASKED) { in icp_deliver_irq()
447 XICS_DBG("irq %#x masked pending\n", new_irq); in icp_deliver_irq()
1003 …seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending … in xics_debug_show()
1048 ics->irq_state[i].priority = MASKED; in kvmppc_xics_create_ics()
[all …]
/linux-6.12.1/drivers/gpu/drm/i915/
Di915_reg_defs.h96 * @return: @__val masked and shifted into the field defined by @__mask.
113 * @return: @__val masked and shifted into the field defined by @__mask.
130 * @return: Masked and shifted value of the field defined by @__mask in @__val.
142 * @return: Masked and shifted value of the field defined by @__mask in @__val.
185 * @return: @__val masked and shifted into the field defined by @__mask.
259 * @return: Masked and shifted value of the field defined by @__mask in @__val.
/linux-6.12.1/drivers/xen/events/
Devents_2l.c128 * the interrupt edge' if the channel is masked. in evtchn_2l_unmask()
203 * If we masked out all events, wrap to beginning. in evtchn_2l_handle_events()
237 /* If we masked out all events, move on. */ in evtchn_2l_handle_events()
284 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i, in xen_debug_interrupt()
337 ? "" : " globally-masked", in xen_debug_interrupt()
339 ? "" : " locally-masked"); in xen_debug_interrupt()
/linux-6.12.1/include/linux/soc/qcom/
Dirq.h14 * IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP: Line must be masked at TLMM and the
27 * requires the interrupt be masked at the child interrupt controller.
/linux-6.12.1/Documentation/virt/kvm/devices/
Dxics.rst82 * Masked flag, 1 bit
84 This bit is set to 1 if the interrupt is masked (cannot be delivered
86 call, or 0 if it is not masked.
/linux-6.12.1/arch/mips/include/asm/
Dmsc01_ic.h25 #define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
26 #define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
30 #define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
43 #define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
/linux-6.12.1/arch/arm64/include/asm/
Dirqflags.h16 * Masking debug exceptions causes all other exceptions to be masked too/
18 * always masked and unmasked together, and have no side effects for other
155 * state if interrupts are already disabled/masked. in __pmr_local_irq_save()
/linux-6.12.1/arch/powerpc/include/asm/
Dhw_irq.h42 * Some soft-masked interrupts must be hard masked until they are replayed
43 * (e.g., because the soft-masked handler does not clear the exception).
44 * Interrupt replay itself must remain hard masked too.
342 * is a different soft-masked interrupt pending that requires hard
358 * soft-masked. in should_hard_irq_enable()
/linux-6.12.1/net/openvswitch/
Dactions.c254 /* 'src' is already properly masked. */
426 const __be32 mask[4], __be32 masked[4]) in mask_ipv6_addr()
428 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]); in mask_ipv6_addr()
429 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]); in mask_ipv6_addr()
430 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]); in mask_ipv6_addr()
431 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]); in mask_ipv6_addr()
566 __be32 masked[4]; in set_ipv6() local
568 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); in set_ipv6()
570 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { in set_ipv6()
571 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, in set_ipv6()
[all …]
/linux-6.12.1/drivers/gpio/
Dgpio-virtio.c35 bool masked; member
213 if (WARN_ON(irq_line->queued || irq_line->masked || irq_line->disabled)) in virtio_gpio_irq_prepare()
240 irq_line->masked = false; in virtio_gpio_irq_enable()
255 irq_line->masked = true; in virtio_gpio_irq_disable()
269 irq_line->masked = true; in virtio_gpio_irq_mask()
280 irq_line->masked = false; in virtio_gpio_irq_unmask()
375 if (irq_line->masked || irq_line->disabled) { in ignore_irq()
605 vgpio->irq_lines[i].masked = true; in virtio_gpio_probe()
/linux-6.12.1/Documentation/admin-guide/perf/
Dimx-ddr.rst40 - 0: corresponding bit is masked.
41 - 1: corresponding bit is not masked, i.e. used to do the matching.
44 When non-masked bits are matching corresponding AXI_ID bits then counter is
/linux-6.12.1/drivers/irqchip/
Dirq-mtk-cirq.c207 bool pending, masked; in mtk_cirq_suspend() local
222 * - For each interrupt, inspect its pending and masked status at GIC in mtk_cirq_suspend()
238 &masked); in mtk_cirq_suspend()
241 (pending && !masked)) in mtk_cirq_suspend()
/linux-6.12.1/drivers/net/ethernet/ti/
Dcpts.h33 u32 intstat_masked; /* Time sync interrupt status masked */
66 #define TS_PEND (1<<0) /* masked interrupt read (after enable) */
67 #define TS_PEND_EN (1<<0) /* masked interrupt enable */

12345678910>>...29