/linux-6.12.1/tools/testing/selftests/resctrl/ |
D | cat_test.c | 1 // SPDX-License-Identifier: GPL-2.0 19 * test with n bits is MIN_DIFF_PERCENT_PER_BIT * (n - 1). With e.g. 5 vs 4 21 * MIN_DIFF_PERCENT_PER_BIT * (4 - 1) = 3 percent. 44 float delta = (__s64)(avg_llc_val - *prev_avg_llc_val); in show_results_info() 81 fp = fopen(param->filename, "r"); in check_results() 85 return -1; in check_results() 115 MIN_DIFF_PERCENT_PER_BIT * (bits - 1), in check_results() 137 * cat_test - Execute CAT benchmark and measure cache misses 169 if (strcmp(param->filename, "") == 0) in cat_test() 170 sprintf(param->filename, "stdio"); in cat_test() [all …]
|
/linux-6.12.1/drivers/gpu/drm/ |
D | drm_fb_dma_helper.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 6 * Author: Lars-Peter Clausen <lars@metafoo.de> 20 #include <linux/dma-mapping.h> 26 * Provides helper functions for creating a DMA-contiguous framebuffer. 28 * Depending on the platform, the buffers may be physically non-contiguous and 30 * physically-contiguous memory (using, for instance, CMA or a pool of memory 35 * callback function to create a DMA-contiguous framebuffer. 39 * drm_fb_dma_get_gem_obj() - Get DMA GEM object for framebuffer 61 * drm_fb_dma_get_gem_addr() - Get DMA (bus) address for framebuffer, for pixel 78 u32 block_w = drm_format_info_block_width(fb->format, plane); in drm_fb_dma_get_gem_addr() [all …]
|
/linux-6.12.1/Documentation/core-api/ |
D | swiotlb.rst | 1 .. SPDX-License-Identifier: GPL-2.0 19 These APIs use the device DMA attributes and kernel-wide settings to determine 30 --------------- 33 only provide 32-bit DMA addresses. By allocating bounce buffer memory below 40 directed to guest memory that is unencrypted. CoCo VMs set a kernel-wide option 54 IOMMU access control is per-granule, the untrusted device can gain access to 60 ------------------ 64 buffer memory is physically contiguous. The expectation is that the DMA layer 85 ------------------------------ 89 pre-allocated at boot time (but see Dynamic swiotlb below). Because swiotlb [all …]
|
D | dma-api.rst | 8 of the API (and actual examples), see Documentation/core-api/dma-api-howto.rst. 11 Part II describes extensions for supporting non-consistent memory 13 non-consistent platforms (this is usually only legacy platforms) you 16 Part I - dma_API 17 ---------------- 19 To get the dma_API, you must #include <linux/dma-mapping.h>. This 27 Part Ia - Using large DMA-coherent buffers 28 ------------------------------------------ 76 Part Ib - Using small DMA-coherent buffers 77 ------------------------------------------ [all …]
|
/linux-6.12.1/Documentation/driver-api/dmaengine/ |
D | provider.rst | 20 DMA-eligible devices to the controller itself. Whenever the device 44 transfer into smaller sub-transfers. 47 that involve a single contiguous block of data. However, some of the 49 non-contiguous buffers to a contiguous buffer, which is called 50 scatter-gather. 53 scatter-gather. So we're left with two cases here: either we have a 56 that implements in hardware scatter-gather. 79 These were just the general memory-to-memory (also called mem2mem) or 80 memory-to-device (mem2dev) kind of transfers. Most devices often 98 documentation file in Documentation/crypto/async-tx-api.rst. [all …]
|
/linux-6.12.1/kernel/dma/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0-only 99 pools as needed. To reduce run-time kernel memory requirements, you 123 <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt> 128 # Should be selected if we can mmap non-coherent mappings to userspace. 161 bool "DMA Contiguous Memory Allocator" 164 This enables the Contiguous Memory Allocator which allows drivers 165 to allocate big physically-contiguous blocks of memory for use with 166 hardware components that do not support I/O map nor scatter-gather. 171 For more information see <kernel/dma/contiguous.c>. 177 bool "Enable separate DMA Contiguous Memory Area for NUMA Node" [all …]
|
/linux-6.12.1/Documentation/mm/ |
D | memory-model.rst | 1 .. SPDX-License-Identifier: GPL-2.0 9 spans a contiguous range up to the maximal address. It could be, 11 for the CPU. Then there could be several contiguous ranges at 23 Regardless of the selected memory model, there exists one-to-one 35 non-NUMA systems with contiguous, or mostly contiguous, physical 54 straightforward: `PFN - ARCH_PFN_OFFSET` is an index to the 65 as hot-plug and hot-remove of the physical memory, alternative memory 66 maps for non-volatile memory devices and deferred initialization of 85 NR\_MEM\_SECTIONS = 2 ^ {(MAX\_PHYSMEM\_BITS - SECTION\_SIZE\_BITS)} 87 The `mem_section` objects are arranged in a two-dimensional array [all …]
|
/linux-6.12.1/arch/x86/kernel/cpu/resctrl/ |
D | ctrlmondata.c | 1 // SPDX-License-Identifier: GPL-2.0-only 4 * - Cache Allocation code. 40 if (!r->membw.delay_linear && r->membw.arch_needs_linear) { in bw_validate() 41 rdt_last_cmd_puts("No support for non-linear MB domains\n"); in bw_validate() 57 if (bw < r->membw.min_bw || bw > r->default_ctrl) { in bw_validate() 59 bw, r->membw.min_bw, r->default_ctrl); in bw_validate() 63 *data = roundup(bw, (unsigned long)r->membw.bw_gran); in bw_validate() 71 u32 closid = data->rdtgrp->closid; in parse_bw() 72 struct rdt_resource *r = s->res; in parse_bw() 75 cfg = &d->staged_config[s->conf_type]; in parse_bw() [all …]
|
/linux-6.12.1/arch/nios2/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0 51 int "Order of maximal physically contiguous allocations" 55 contiguous allocations. The limit is called MAX_PAGE_ORDER and it 57 allocated as a single contiguous block. This option allows 59 large blocks of physically contiguous memory is required. 82 2 or 4. Any non-aligned load/store instructions will be trapped and 99 some command-line options at build time by entering them here. In 120 bool "Passed kernel command line from u-boot" 122 Use bootargs env variable from u-boot for kernel command line.
|
/linux-6.12.1/drivers/gpu/drm/exynos/ |
D | exynos_drm_gem.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 9 #include <linux/dma-buf.h> 27 struct drm_device *dev = exynos_gem->base.dev; in exynos_drm_alloc_buf() 30 if (exynos_gem->dma_addr) { in exynos_drm_alloc_buf() 36 * if EXYNOS_BO_CONTIG, fully physically contiguous memory in exynos_drm_alloc_buf() 37 * region will be allocated else physically contiguous in exynos_drm_alloc_buf() 40 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) in exynos_drm_alloc_buf() 47 if (exynos_gem->flags & EXYNOS_BO_WC || in exynos_drm_alloc_buf() 48 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) in exynos_drm_alloc_buf() 55 exynos_gem->dma_attrs = attr; in exynos_drm_alloc_buf() [all …]
|
D | exynos_drm_fb.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 8 * Seung-Woo Kim <sw0312.kim@samsung.com> 37 flags = exynos_gem->flags; in check_fb_gem_memory_type() 40 * Physically non-contiguous memory type for framebuffer is not in check_fb_gem_memory_type() 44 DRM_DEV_ERROR(drm_dev->dev, in check_fb_gem_memory_type() 45 "Non-contiguous GEM memory is not supported.\n"); in check_fb_gem_memory_type() 46 return -EINVAL; in check_fb_gem_memory_type() 69 return ERR_PTR(-ENOMEM); in exynos_drm_framebuffer_init() 76 fb->obj[i] = &exynos_gem[i]->base; in exynos_drm_framebuffer_init() 83 DRM_DEV_ERROR(dev->dev, in exynos_drm_framebuffer_init() [all …]
|
/linux-6.12.1/arch/sh/mm/ |
D | Kconfig | 1 # SPDX-License-Identifier: GPL-2.0 12 Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to 15 On other systems (such as the SH-3 and 4) where an MMU exists, 26 On MMU-less systems, any of these page sizes can be selected 34 int "Order of maximal physically contiguous allocations" 41 contiguous allocations. The limit is called MAX_PAGE:_ORDER and it 43 allocated as a single contiguous block. This option allows 45 large blocks of physically contiguous memory is required. 89 bool "Support 32-bit physical addressing through PMB" 95 32-bits through the SH-4A PMB. If this is not set, legacy [all …]
|
/linux-6.12.1/Documentation/admin-guide/mm/ |
D | nommu-mmap.rst | 2 No-MMU memory mapping support 5 The kernel has limited support for memory mapping under no-MMU conditions, such 16 The behaviour is similar between the MMU and no-MMU cases, but not identical; 21 In the MMU case: VM regions backed by arbitrary pages; copy-on-write 24 In the no-MMU case: VM regions backed by arbitrary contiguous runs of 31 the no-MMU case doesn't support these, behaviour is identical to 39 In the no-MMU case: 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device 50 - If the backing device can't or won't permit direct sharing, [all …]
|
D | hugetlbpage.rst | 13 256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical 93 Once a number of huge pages have been pre-allocated to the kernel huge page 169 indicates the current number of pre-allocated huge pages of the default size. 180 task that modifies ``nr_hugepages``. The default for the allowed nodes--when the 181 task has default memory policy--is all on-line nodes with memory. Allowed 182 nodes with insufficient available, contiguous memory for a huge page will be 189 physically contiguous memory that is present in system at the time of the 192 allocating extra pages on other nodes with sufficient available contiguous 197 the boot process when the possibility of getting physical contiguous pages 206 requested by applications. Writing any non-zero value into this file [all …]
|
D | pagemap.rst | 12 physical frame each virtual page is mapped to. It contains one 64-bit 16 * Bits 0-54 page frame number (PFN) if present 17 * Bits 0-4 swap type if swapped 18 * Bits 5-54 swap offset if swapped 19 * Bit 55 pte is soft-dirty (see 20 Documentation/admin-guide/mm/soft-dirty.rst) 22 * Bit 57 pte is uffd-wp write-protected (since 5.13) (see 23 Documentation/admin-guide/mm/userfaultfd.rst) 24 * Bits 58-60 zero 25 * Bit 61 page is file-page or shared-anon (since 3.5) [all …]
|
/linux-6.12.1/tools/perf/pmu-events/arch/arm64/ |
D | common-and-microarch.json | 129 "PublicDescription": "Attributable Level 1 data cache write-back", 132 "BriefDescription": "Attributable Level 1 data cache write-back" 147 "PublicDescription": "Attributable Level 2 data cache write-back", 150 "BriefDescription": "Attributable Level 2 data cache write-back" 273 "PublicDescription": "Access to another socket in a multi-socket system", 276 "BriefDescription": "Access to another socket in a multi-socket system" 303 … "PublicDescription": "Attributable memory read access to another socket in a multi-socket system", 306 … "BriefDescription": "Attributable memory read access to another socket in a multi-socket system" 309 …"PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory r… 312 "BriefDescription": "Level 1 data cache long-latency read miss" [all …]
|
/linux-6.12.1/drivers/vfio/pci/ |
D | vfio_pci_igd.c | 1 // SPDX-License-Identifier: GPL-2.0-only 8 * Register a device specific region through which to provide read-only 34 * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position. 44 * Return: 0 on success, -EFAULT otherwise. 55 return -EFAULT; in igd_opregion_shift_copy() 59 *remaining -= bytes; in igd_opregion_shift_copy() 68 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; in vfio_pci_igd_rw() 69 struct igd_opregion_vbt *opregionvbt = vdev->region[i].data; in vfio_pci_igd_rw() 73 if (pos >= vdev->region[i].size || iswrite) in vfio_pci_igd_rw() 74 return -EINVAL; in vfio_pci_igd_rw() [all …]
|
/linux-6.12.1/drivers/gpu/drm/xe/ |
D | xe_bo_doc.h | 1 /* SPDX-License-Identifier: MIT */ 25 * ---------- 32 * vmap (XE can access the memory via xe_map layer) and have contiguous physical 35 * More details of why kernel BOs are pinned and contiguous below. 38 * -------- 53 * the BO dma-resv slots / lock point to the VM's dma-resv slots / lock (all 54 * private BOs to a VM share common dma-resv slots / lock). 62 * own unique dma-resv slots / lock. An external BO will be in an array of all 90 * ---------------- 109 * dma-resv slots. [all …]
|
/linux-6.12.1/mm/ |
D | util.c | 1 // SPDX-License-Identifier: GPL-2.0-only 20 #include <linux/elf-randomize.h> 35 * kfree_const - conditionally free memory 48 * kstrdup - allocate space for and copy an existing string 72 * kstrdup_const - conditionally duplicate an existing const string 92 * kstrndup - allocate space for and copy an existing string 120 * kmemdup - duplicate region of memory 127 * result is physically contiguous. Use kfree() to free. 141 * kmemdup_array - duplicate a given array. 149 * result is physically contiguous. Use kfree() to free. [all …]
|
/linux-6.12.1/drivers/s390/cio/ |
D | itcw.c | 1 // SPDX-License-Identifier: GPL-2.0 20 * struct itcw - incremental tcw helper data type 23 * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate 25 * contiguous buffer provided by the user. 28 * - reset unused fields to zero 29 * - fill in required pointers 30 * - ensure required alignment for data structures 31 * - prevent data structures to cross 4k-byte boundary where required 32 * - calculate tccb-related length fields 33 * - optionally provide ready-made interrogate tcw and associated structures [all …]
|
/linux-6.12.1/Documentation/devicetree/bindings/arm/ |
D | arm,coresight-catu.yaml | 1 # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 3 --- 4 $id: http://devicetree.org/schemas/arm/arm,coresight-catu.yaml# 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 10 - Mathieu Poirier <mathieu.poirier@linaro.org> 11 - Mike Leach <mike.leach@linaro.org> 12 - Leo Yan <leo.yan@linaro.org> 13 - Suzuki K Poulose <suzuki.poulose@arm.com> 26 translates contiguous Virtual Addresses (VAs) from an AXI master into 27 non-contiguous Physical Addresses (PAs) that are intended for system memory. [all …]
|
/linux-6.12.1/fs/ext4/ |
D | readpage.c | 1 // SPDX-License-Identifier: GPL-2.0 21 * - encountering a page which has buffers 22 * - encountering a page which has a non-hole after a hole 23 * - encountering a page with non-contiguous blocks 25 * then this code just gives up and calls the buffer_head-based read function. 26 * It does handle a page which has holes at the end - that is a common case: 27 * the end-of-file on blocksize < PAGE_SIZE setups. 44 #include <linux/backing-dev.h> 74 folio_end_read(fi.folio, bio->bi_status == 0); in __read_end_io() 75 if (bio->bi_private) in __read_end_io() [all …]
|
/linux-6.12.1/Documentation/dev-tools/ |
D | kmsan.rst | 1 .. SPDX-License-Identifier: GPL-2.0 19 ------------------- 27 -------------- 32 BUG: KMSAN: uninit-value in test_uninit_kmsan_check_memory+0x1be/0x380 [kmsan_test] 36 kunit_generic_run_threadfn_adapter+0x6d/0xc0 lib/kunit/try-catch.c:28 45 kunit_generic_run_threadfn_adapter+0x6d/0xc0 lib/kunit/try-catch.c:28 53 Bytes 4-7 of 8 are uninitialized 56 CPU: 0 PID: 6731 Comm: kunit_try_catch Tainted: G B E 5.16.0-rc3+ #104 57 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 71 - in a condition, e.g. ``if (v) { ... }``; [all …]
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_dcb_lib.c | 1 // SPDX-License-Identifier: GPL-2.0 9 * ice_dcb_get_ena_tc - return bitmap of enabled TCs 43 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_is_pfc_causing_hung_q() 47 for (tc = 0; tc < num_tcs - 1; tc++) in ice_is_pfc_causing_hung_q() 48 if (ice_find_q_in_range(vsi->tc_cfg.tc_info[tc].qoffset, in ice_is_pfc_causing_hung_q() 49 vsi->tc_cfg.tc_info[tc + 1].qoffset, in ice_is_pfc_causing_hung_q() 56 up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); in ice_is_pfc_causing_hung_q() 70 ref_prio_xoff[i] = pf->stats.priority_xoff_rx[i]; in ice_is_pfc_causing_hung_q() 76 if (pf->stats.priority_xoff_rx[i] > ref_prio_xoff[i]) in ice_is_pfc_causing_hung_q() 83 * ice_dcb_get_mode - gets the DCB mode [all …]
|
/linux-6.12.1/fs/ |
D | mpage.c | 1 // SPDX-License-Identifier: GPL-2.0 30 #include <linux/backing-dev.h> 37 * The mpage code never puts partial pages into a BIO (except for end-of-file). 38 * If a page does not map to a contiguous run of blocks then it simply falls 49 int err = blk_status_to_errno(bio->bi_status); in mpage_read_end_io() 60 int err = blk_status_to_errno(bio->bi_status); in mpage_write_end_io() 64 mapping_set_error(fi.folio->mapping, err); in mpage_write_end_io() 73 bio->bi_end_io = mpage_read_end_io; in mpage_bio_submit_read() 81 bio->bi_end_io = mpage_write_end_io; in mpage_bio_submit_write() 100 struct inode *inode = folio->mapping->host; in map_buffer_to_folio() [all …]
|