/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_bo_evict.c | 62 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() 64 bo = list_first_entry_or_null(&xe->pinned.external_vram, in xe_bo_evict_all() 70 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all() 77 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() 79 &xe->pinned.external_vram); in xe_bo_evict_all() 80 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all() 84 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() 86 list_splice_tail(&still_in_list, &xe->pinned.external_vram); in xe_bo_evict_all() 87 spin_unlock(&xe->pinned.lock); in xe_bo_evict_all() 96 spin_lock(&xe->pinned.lock); in xe_bo_evict_all() [all …]
|
/linux-6.12.1/rust/kernel/init/ |
D | macros.rs | 573 @pinned(), 596 @pinned($($pinned:tt)*), 618 @pinned($($pinned)* $($accum)* $field: ::core::marker::PhantomPinned,), 636 @pinned($($pinned:tt)*), 653 @pinned($($pinned)* $($accum)* $field: $type,), 671 @pinned($($pinned:tt)*), 688 @pinned($($pinned)*), 706 @pinned($($pinned:tt)*), 724 @pinned($($pinned)*), 744 @pinned($($pinned:tt)*), [all …]
|
/linux-6.12.1/drivers/infiniband/core/ |
D | umem.c | 153 int pinned, ret; in ib_umem_get() local 213 pinned = pin_user_pages_fast(cur_base, in ib_umem_get() 218 if (pinned < 0) { in ib_umem_get() 219 ret = pinned; in ib_umem_get() 223 cur_base += pinned * PAGE_SIZE; in ib_umem_get() 224 npages -= pinned; in ib_umem_get() 226 &umem->sgt_append, page_list, pinned, 0, in ib_umem_get() 227 pinned << PAGE_SHIFT, ib_dma_max_seg_size(device), in ib_umem_get() 230 unpin_user_pages_dirty_lock(page_list, pinned, 0); in ib_umem_get()
|
D | umem_dmabuf.c | 220 umem_dmabuf->pinned = 1; in ib_umem_dmabuf_get_pinned_with_dma_device() 256 if (umem_dmabuf->pinned) { in ib_umem_dmabuf_revoke() 258 umem_dmabuf->pinned = 0; in ib_umem_dmabuf_revoke()
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | pin_system.c | 119 int pinned, cleared; in pin_system_pages() local 138 pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0, in pin_system_pages() 141 if (pinned < 0) { in pin_system_pages() 143 SDMA_DBG(req, "pinned %d", pinned); in pin_system_pages() 144 return pinned; in pin_system_pages() 146 if (pinned != npages) { in pin_system_pages() 147 unpin_vector_pages(current->mm, pages, node->npages, pinned); in pin_system_pages() 148 SDMA_DBG(req, "npages %u pinned %d", npages, pinned); in pin_system_pages() 155 atomic_add(pinned, &pq->n_locked); in pin_system_pages() 156 SDMA_DBG(req, "done. pinned %d", pinned); in pin_system_pages()
|
D | user_exp_rcv.c | 161 int pinned; in pin_rcv_pages() local 187 pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages); in pin_rcv_pages() 188 if (pinned <= 0) { in pin_rcv_pages() 190 return pinned; in pin_rcv_pages() 193 fd->tid_n_pinned += pinned; in pin_rcv_pages() 194 return pinned; in pin_rcv_pages() 249 int ret = 0, need_group = 0, pinned; in hfi1_user_exp_rcv_setup() local 288 pinned = pin_rcv_pages(fd, tidbuf); in hfi1_user_exp_rcv_setup() 289 if (pinned <= 0) { in hfi1_user_exp_rcv_setup() 290 ret = (pinned < 0) ? pinned : -ENOSPC; in hfi1_user_exp_rcv_setup() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | xen.h | 287 TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned), 288 TP_ARGS(mm, pfn, level, pinned), 293 __field(bool, pinned) 298 __entry->pinned = pinned), 301 __entry->pinned ? "" : "un") 305 TP_PROTO(unsigned long pfn, unsigned level, bool pinned), 306 TP_ARGS(pfn, level, pinned), 310 __field(bool, pinned) 314 __entry->pinned = pinned), 317 __entry->pinned ? "" : "un")
|
/linux-6.12.1/drivers/fpga/ |
D | dfl-afu-dma-region.c | 39 int ret, pinned; in afu_dma_pin_pages() local 51 pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages() 53 if (pinned < 0) { in afu_dma_pin_pages() 54 ret = pinned; in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 61 dev_dbg(dev, "%d pages pinned\n", pinned); in afu_dma_pin_pages() 66 unpin_user_pages(region->pages, pinned); in afu_dma_pin_pages()
|
/linux-6.12.1/drivers/gpu/drm/i915/gem/ |
D | i915_gem_userptr.c | 239 int pinned, ret; in i915_gem_object_userptr_submit_init() local 267 pinned = 0; in i915_gem_object_userptr_submit_init() 268 while (pinned < num_pages) { in i915_gem_object_userptr_submit_init() 269 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, in i915_gem_object_userptr_submit_init() 270 num_pages - pinned, gup_flags, in i915_gem_object_userptr_submit_init() 271 &pvec[pinned]); in i915_gem_object_userptr_submit_init() 275 pinned += ret; in i915_gem_object_userptr_submit_init() 303 unpin_user_pages(pvec, pinned); in i915_gem_object_userptr_submit_init()
|
/linux-6.12.1/tools/bpf/bpftool/Documentation/ |
D | bpftool-map.rst | 47 | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* } 49 | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } 166 Show file names of pinned maps. 216 | **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00** 224 processing. Note that the prog array map MUST be pinned into the BPF virtual 238 pinned /sys/fs/bpf/foo/xdp 242 pinned /sys/fs/bpf/foo/process 246 pinned /sys/fs/bpf/foo/debug 258 | **# bpftool map dump pinned /sys/fs/bpf/bar** 265 | **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug** [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/ |
D | test_flow_dissector.sh | 30 if ! unshare --net $bpftool prog attach pinned \ 36 $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \ 39 if unshare --net $bpftool prog attach pinned \ 45 if ! $bpftool prog detach pinned \
|
/linux-6.12.1/drivers/virt/acrn/ |
D | mm.c | 164 int ret, pinned; in acrn_vm_ram_map() local 242 pinned = pin_user_pages_fast(memmap->vma_base, in acrn_vm_ram_map() 245 if (pinned < 0) { in acrn_vm_ram_map() 246 ret = pinned; in acrn_vm_ram_map() 248 } else if (pinned != nr_pages) { in acrn_vm_ram_map() 343 for (i = 0; i < pinned; i++) in acrn_vm_ram_map()
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | intel_gt_buffer_pool.c | 107 if (node->pinned) { in pool_retire() 112 node->pinned = false; in pool_retire() 129 if (node->pinned) in intel_gt_buffer_pool_mark_used() 135 node->pinned = true; in intel_gt_buffer_pool_mark_used() 153 node->pinned = false; in node_create()
|
/linux-6.12.1/Documentation/core-api/ |
D | pin_user_pages.rst | 35 In other words, use pin_user_pages*() for DMA-pinned pages, and 89 Tracking dma-pinned pages 92 Some of the key design constraints, and solutions, for tracking dma-pinned 98 * False positives (reporting that a page is dma-pinned, when in fact it is not) 105 the upper bits in that field for a dma-pinned count. "Sort of", means that, 109 on it 1024 times, then it will appear to have a single dma-pinned count. 121 * Callers must specifically request "dma-pinned tracking of pages". In other 152 NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's 202 The whole point of marking folios as "DMA-pinned" or "gup-pinned" is to be able 203 to query, "is this folio DMA-pinned?" That allows code such as folio_mkclean() [all …]
|
/linux-6.12.1/arch/powerpc/mm/book3s64/ |
D | iommu_api.c | 61 long i, ret, locked_entries = 0, pinned = 0; in mm_iommu_do_alloc() local 110 pinned += n; in mm_iommu_do_alloc() 114 pinned += ret; in mm_iommu_do_alloc() 118 if (pinned != entries) { in mm_iommu_do_alloc() 175 unpin_user_pages(mem->hpages, pinned); in mm_iommu_do_alloc()
|
/linux-6.12.1/arch/arm64/mm/ |
D | context.c | 179 if (refcount_read(&mm->context.pinned)) in new_context() 285 if (refcount_inc_not_zero(&mm->context.pinned)) in arm64_mm_context_get() 304 refcount_set(&mm->context.pinned, 1); in arm64_mm_context_get() 329 if (refcount_dec_and_test(&mm->context.pinned)) { in arm64_mm_context_put()
|
/linux-6.12.1/Documentation/arch/powerpc/ |
D | pmu-ebb.rst | 53 existing "pinned" and "exclusive" attributes of perf_events. This means EBB 54 events will be given priority over other events, unless they are also pinned. 55 If an EBB event and a regular event are both pinned, then whichever is enabled 70 An EBB event must be created with the "pinned" and "exclusive" attributes set. 100 This behaviour occurs because the EBB event is pinned and exclusive. When the 101 EBB event is enabled it will force all other non-pinned events off the PMU. In 103 pinned on the PMU then the enable will not be successful.
|
/linux-6.12.1/Documentation/infiniband/ |
D | user_verbs.rst | 54 amount of memory pinned in the process's pinned_vm, and checks that 57 Pages that are pinned multiple times are counted each time they are 58 pinned, so the value of pinned_vm may be an overestimate of the 59 number of pages pinned by a process.
|
/linux-6.12.1/drivers/s390/cio/ |
D | vfio_ccw_cp.c | 133 int pinned = 0, npage = 1; in page_array_pin() local 136 while (pinned < pa->pa_nr) { in page_array_pin() 137 dma_addr_t *first = &pa->pa_iova[pinned]; in page_array_pin() 140 if (pinned + npage < pa->pa_nr && in page_array_pin() 149 &pa->pa_page[pinned]); in page_array_pin() 153 pinned += ret; in page_array_pin() 157 pinned += npage; in page_array_pin() 164 page_array_unpin(pa, vdev, pinned, unaligned); in page_array_pin()
|
/linux-6.12.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 645 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; in etnaviv_gem_userptr_get_pages() local 661 unsigned num_pages = npages - pinned; in etnaviv_gem_userptr_get_pages() 662 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; in etnaviv_gem_userptr_get_pages() 663 struct page **pages = pvec + pinned; in etnaviv_gem_userptr_get_pages() 667 unpin_user_pages(pvec, pinned); in etnaviv_gem_userptr_get_pages() 672 pinned += ret; in etnaviv_gem_userptr_get_pages() 674 } while (pinned < npages); in etnaviv_gem_userptr_get_pages()
|
/linux-6.12.1/samples/bpf/ |
D | tcp_bpf.readme | 15 bpftool cgroup attach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog 28 bpftool cgroup detach /tmp/cgroupv2/foo sock_ops pinned /sys/fs/bpf/tcp_prog
|
/linux-6.12.1/tools/bpf/bpftool/bash-completion/ |
D | bpftool | 286 file|pinned|-B|--base-btf) 331 local MAP_TYPE='id pinned name' 332 local PROG_TYPE='id pinned tag name' 412 pinned) 437 pinned) 508 pinned|pinmaps) 537 pinned) 630 pinned) 888 local MAP_TYPE='id pinned name' 1056 # "id|pinned|tag|name" (we already checked for [all …]
|
/linux-6.12.1/drivers/vhost/ |
D | vdpa.c | 926 unsigned long pfn, pinned; in vhost_vdpa_pa_unmap() local 929 pinned = PFN_DOWN(map->size); in vhost_vdpa_pa_unmap() 931 pinned > 0; pfn++, pinned--) { in vhost_vdpa_pa_unmap() 1106 long pinned; in vhost_vdpa_pa_map() local 1137 pinned = pin_user_pages(cur_base, sz2pin, in vhost_vdpa_pa_map() 1139 if (sz2pin != pinned) { in vhost_vdpa_pa_map() 1140 if (pinned < 0) { in vhost_vdpa_pa_map() 1141 ret = pinned; in vhost_vdpa_pa_map() 1143 unpin_user_pages(page_list, pinned); in vhost_vdpa_pa_map() 1153 for (i = 0; i < pinned; i++) { in vhost_vdpa_pa_map() [all …]
|
/linux-6.12.1/drivers/gpu/drm/radeon/ |
D | radeon_ttm.c | 328 unsigned pinned = 0; in radeon_ttm_tt_pin_userptr() local 349 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr() 350 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; in radeon_ttm_tt_pin_userptr() 351 struct page **pages = ttm->pages + pinned; in radeon_ttm_tt_pin_userptr() 358 pinned += r; in radeon_ttm_tt_pin_userptr() 360 } while (pinned < ttm->num_pages); in radeon_ttm_tt_pin_userptr() 381 release_pages(ttm->pages, pinned); in radeon_ttm_tt_pin_userptr()
|
/linux-6.12.1/security/loadpin/ |
D | Kconfig | 8 can be pinned to the first filesystem used for loading. When 38 parameter. The file must be located on the pinned root and
|