Home
last modified time | relevance | path

Searched refs:hmask (Results 1 – 25 of 34) sorted by relevance

12

/linux-6.12.1/arch/x86/kernel/fpu/
Dxstate.h92 #define XSTATE_OP(op, st, lmask, hmask, err) \ argument
98 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
113 #define XSTATE_XSAVE(st, lmask, hmask, err) \ argument
123 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
130 #define XSTATE_XRESTORE(st, lmask, hmask) \ argument
137 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
184 u32 hmask = mask >> 32; in os_xsave() local
190 XSTATE_XSAVE(&fpstate->regs.xsave, lmask, hmask, err); in os_xsave()
204 u32 hmask = mask >> 32; in os_xrstor() local
207 XSTATE_XRESTORE(&fpstate->regs.xsave, lmask, hmask); in os_xrstor()
[all …]
Dxstate.c320 u32 hmask = mask >> 32; in os_xrstor_booting() local
324 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); in os_xrstor_booting()
326 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); in os_xrstor_booting()
/linux-6.12.1/net/xfrm/
Dxfrm_hash.h87 unsigned int hmask) in __xfrm_dst_hash() argument
98 return (h ^ (h >> 16)) & hmask; in __xfrm_dst_hash()
104 unsigned int hmask) in __xfrm_src_hash() argument
115 return (h ^ (h >> 16)) & hmask; in __xfrm_src_hash()
120 unsigned short family, unsigned int hmask) in __xfrm_spi_hash() argument
131 return (h ^ (h >> 10) ^ (h >> 20)) & hmask; in __xfrm_spi_hash()
135 __xfrm_seq_hash(u32 seq, unsigned int hmask) in __xfrm_seq_hash() argument
138 return (h ^ (h >> 10) ^ (h >> 20)) & hmask; in __xfrm_seq_hash()
141 static inline unsigned int __idx_hash(u32 index, unsigned int hmask) in __idx_hash() argument
143 return (index ^ (index >> 8)) & hmask; in __idx_hash()
[all …]
Dxfrm_policy.c529 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; in policy_hash_bysel() local
535 hash = __sel_hash(sel, family, hmask, dbits, sbits); in policy_hash_bysel()
537 if (hash == hmask + 1) in policy_hash_bysel()
549 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; in policy_hash_direct() local
555 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); in policy_hash_direct()
620 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; in xfrm_bydst_resize() local
621 unsigned int nhashmask = xfrm_new_hash_mask(hmask); in xfrm_bydst_resize()
636 for (i = hmask; i >= 0; i--) in xfrm_bydst_resize()
640 net->xfrm.policy_bydst[dir].hmask = nhashmask; in xfrm_bydst_resize()
647 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); in xfrm_bydst_resize()
[all …]
/linux-6.12.1/arch/riscv/kvm/
Dvcpu_sbi_v01.c19 ulong hmask; in kvm_sbi_ext_v01_handler() local
50 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); in kvm_sbi_ext_v01_handler()
52 hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; in kvm_sbi_ext_v01_handler()
56 for_each_set_bit(i, &hmask, BITS_PER_LONG) { in kvm_sbi_ext_v01_handler()
72 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); in kvm_sbi_ext_v01_handler()
74 hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1; in kvm_sbi_ext_v01_handler()
79 kvm_riscv_fence_i(vcpu->kvm, 0, hmask); in kvm_sbi_ext_v01_handler()
83 0, hmask); in kvm_sbi_ext_v01_handler()
86 0, hmask, in kvm_sbi_ext_v01_handler()
92 0, hmask, in kvm_sbi_ext_v01_handler()
[all …]
Dtlb.c290 unsigned long hbase, unsigned long hmask, in make_xfence_request() argument
304 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase)))) in make_xfence_request()
326 unsigned long hbase, unsigned long hmask) in kvm_riscv_fence_i() argument
328 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I, in kvm_riscv_fence_i()
333 unsigned long hbase, unsigned long hmask, in kvm_riscv_hfence_gvma_vmid_gpa() argument
344 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, in kvm_riscv_hfence_gvma_vmid_gpa()
349 unsigned long hbase, unsigned long hmask) in kvm_riscv_hfence_gvma_vmid_all() argument
351 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL, in kvm_riscv_hfence_gvma_vmid_all()
356 unsigned long hbase, unsigned long hmask, in kvm_riscv_hfence_vvma_asid_gva() argument
367 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, in kvm_riscv_hfence_vvma_asid_gva()
[all …]
Dvcpu_sbi_replace.c52 unsigned long hmask = cp->a0; in kvm_sbi_ext_ipi_handler() local
65 if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) in kvm_sbi_ext_ipi_handler()
87 unsigned long hmask = cp->a0; in kvm_sbi_ext_rfence_handler() local
93 kvm_riscv_fence_i(vcpu->kvm, hbase, hmask); in kvm_sbi_ext_rfence_handler()
98 kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask); in kvm_sbi_ext_rfence_handler()
100 kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask, in kvm_sbi_ext_rfence_handler()
107 hbase, hmask, cp->a4); in kvm_sbi_ext_rfence_handler()
110 hbase, hmask, in kvm_sbi_ext_rfence_handler()
/linux-6.12.1/arch/riscv/kernel/
Dsbi.c31 unsigned long hmask = 0; in __sbi_v01_cpumask_to_hartmask() local
46 hmask |= BIT(hartid); in __sbi_v01_cpumask_to_hartmask()
49 return hmask; in __sbi_v01_cpumask_to_hartmask()
204 static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask, in __sbi_rfence_v02_call() argument
215 ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0); in __sbi_rfence_v02_call()
218 ret = sbi_ecall(ext, fid, hmask, hbase, start, in __sbi_rfence_v02_call()
222 ret = sbi_ecall(ext, fid, hmask, hbase, start, in __sbi_rfence_v02_call()
227 ret = sbi_ecall(ext, fid, hmask, hbase, start, in __sbi_rfence_v02_call()
231 ret = sbi_ecall(ext, fid, hmask, hbase, start, in __sbi_rfence_v02_call()
235 ret = sbi_ecall(ext, fid, hmask, hbase, start, in __sbi_rfence_v02_call()
[all …]
/linux-6.12.1/arch/riscv/include/asm/
Dkvm_host.h320 unsigned long hbase, unsigned long hmask);
322 unsigned long hbase, unsigned long hmask,
326 unsigned long hbase, unsigned long hmask);
328 unsigned long hbase, unsigned long hmask,
332 unsigned long hbase, unsigned long hmask,
335 unsigned long hbase, unsigned long hmask,
339 unsigned long hbase, unsigned long hmask);
/linux-6.12.1/include/linux/
Dinetdevice.h216 __u32 hmask; in bad_mask() local
219 hmask = ntohl(mask); in bad_mask()
220 if (hmask & (hmask+1)) in bad_mask()
298 __u32 hmask = ntohl(mask); in inet_mask_len() local
299 if (!hmask) in inet_mask_len()
301 return 32 - ffz(~hmask); in inet_mask_len()
Dpagewalk.h71 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
/linux-6.12.1/arch/powerpc/mm/book3s32/
Dmmu.c398 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); in MMU_init_hw_patch() local
417 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); in MMU_init_hw_patch()
418 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask); in MMU_init_hw_patch()
426 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); in MMU_init_hw_patch()
/linux-6.12.1/drivers/net/wan/
Dfsl_ucc_hdlc.h56 __be16 hmask; member
105 unsigned short hmask; member
Dfsl_ucc_hdlc.c282 iowrite16be(priv->hmask, &priv->ucc_pram->hmask); in uhdlc_init()
1214 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) in ucc_hdlc_probe()
1215 uhdlc_priv->hmask = DEFAULT_ADDR_MASK; in ucc_hdlc_probe()
/linux-6.12.1/include/net/netns/
Dxfrm.h16 unsigned int hmask; member
/linux-6.12.1/mm/
Dpagewalk.c268 unsigned long hmask = huge_page_mask(h); in walk_hugetlb_range() local
277 pte = hugetlb_walk(vma, addr & hmask, sz); in walk_hugetlb_range()
279 err = ops->hugetlb_entry(pte, hmask, addr, next, walk); in walk_hugetlb_range()
Dhmm.c468 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, in hmm_vma_walk_hugetlb_entry() argument
509 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); in hmm_vma_walk_hugetlb_entry()
Dmincore.c25 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, in mincore_hugetlb() argument
/linux-6.12.1/drivers/gpu/drm/tegra/
Ddrm.h48 unsigned int hmask, vmask; member
/linux-6.12.1/include/media/tpg/
Dv4l2-tpg.h195 unsigned hmask[TPG_MAX_PLANES]; member
442 return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) * in tpg_hdiv()
/linux-6.12.1/mm/damon/
Dvaddr.c369 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, in damon_mkold_hugetlb_entry() argument
503 static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, in damon_young_hugetlb_entry() argument
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/abm/
Dcls.c45 if (knode->sel->hoff || knode->sel->hmask) { in nfp_abm_u32_check_knode()
/linux-6.12.1/drivers/net/ethernet/intel/ixgbe/
Dixgbe_x550.c3705 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; in ixgbe_acquire_swfw_sync_x550em_a() local
3711 if (hmask) in ixgbe_acquire_swfw_sync_x550em_a()
3712 status = ixgbe_acquire_swfw_sync_X540(hw, hmask); in ixgbe_acquire_swfw_sync_x550em_a()
3721 if (hmask) in ixgbe_acquire_swfw_sync_x550em_a()
3722 ixgbe_release_swfw_sync_X540(hw, hmask); in ixgbe_acquire_swfw_sync_x550em_a()
3740 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; in ixgbe_release_swfw_sync_x550em_a() local
3745 if (hmask) in ixgbe_release_swfw_sync_x550em_a()
3746 ixgbe_release_swfw_sync_X540(hw, hmask); in ixgbe_release_swfw_sync_x550em_a()
/linux-6.12.1/tools/include/uapi/linux/
Dpkt_cls.h182 __be32 hmask; member
/linux-6.12.1/fs/proc/
Dtask_mmu.c1004 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, in smaps_hugetlb_range() argument
1862 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, in pagemap_hugetlb_range() argument
1892 ((addr & ~hmask) >> PAGE_SHIFT); in pagemap_hugetlb_range()
2550 static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask, in pagemap_scan_hugetlb_entry() argument
2971 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, in gather_hugetlb_stats() argument
2989 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, in gather_hugetlb_stats() argument

12