Home
last modified time | relevance | path

Searched full:bounce (Results 1 – 25 of 311) sorted by relevance

12345678910>>...13

/linux-6.12.1/Documentation/core-api/
Dswiotlb.rst13 memory buffer. This approach is generically called "bounce buffering", and the
14 temporary memory buffer is called a "bounce buffer".
20 if bounce buffering is necessary. If so, the DMA layer manages the allocation,
21 freeing, and sync'ing of bounce buffers. Since the DMA attributes are per
22 device, some devices in a system may use bounce buffering while others do not.
24 Because the CPU copies data between the bounce buffer and the original target
25 memory buffer, doing bounce buffering is slower than doing DMA directly to the
33 only provide 32-bit DMA addresses. By allocating bounce buffer memory below
41 to force all DMA I/O to use bounce buffers, and the bounce buffer memory is set
42 up as unencrypted. The host does DMA I/O to/from the bounce buffer memory, and
[all …]
/linux-6.12.1/lib/
Dtest_hmm.c127 static int dmirror_bounce_init(struct dmirror_bounce *bounce, in dmirror_bounce_init() argument
131 bounce->addr = addr; in dmirror_bounce_init()
132 bounce->size = size; in dmirror_bounce_init()
133 bounce->cpages = 0; in dmirror_bounce_init()
134 bounce->ptr = vmalloc(size); in dmirror_bounce_init()
135 if (!bounce->ptr) in dmirror_bounce_init()
155 static void dmirror_bounce_fini(struct dmirror_bounce *bounce) in dmirror_bounce_fini() argument
157 vfree(bounce->ptr); in dmirror_bounce_fini()
361 unsigned long end, struct dmirror_bounce *bounce) in dmirror_do_read() argument
366 ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); in dmirror_do_read()
[all …]
/linux-6.12.1/arch/s390/mm/
Dmaccess.c153 void *bounce = ptr; in xlate_dev_mem_ptr() local
165 bounce = (void *)__get_free_page(GFP_ATOMIC); in xlate_dev_mem_ptr()
166 if (!bounce) in xlate_dev_mem_ptr()
172 memcpy(bounce, ptr, size); in xlate_dev_mem_ptr()
176 memcpy(bounce, ptr, size); in xlate_dev_mem_ptr()
178 memcpy(bounce, ptr, size); in xlate_dev_mem_ptr()
183 return bounce; in xlate_dev_mem_ptr()
/linux-6.12.1/Documentation/i2c/
Ddma-considerations.rst49 bounce buffer. But you don't need to care about that detail, just use the
50 returned buffer. If NULL is returned, the threshold was not met or a bounce
54 function ensures a potentially used bounce buffer is freed::
62 The bounce buffer handling from the core is generic and simple. It will always
63 allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
/linux-6.12.1/block/
Dbounce.c2 /* bounce buffer handling for block devices
72 * Simple bounce buffer support for highmem pages. Depending on the
81 * The bio of @from is created by bounce, so we can iterate in copy_to_high_bio_irq()
110 * free up bounce indirect pages used in bounce_end_io()
210 bool bounce = false; in __blk_queue_bounce() local
217 bounce = true; in __blk_queue_bounce()
219 if (!bounce) in __blk_queue_bounce()
Dblk-crypto-fallback.c28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
257 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
258 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
285 /* Allocate bounce bio for encryption */ in blk_crypto_fallback_encrypt_bio()
316 /* Encrypt each page in the bounce bio */ in blk_crypto_fallback_encrypt_bio()
478 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
479 * the bounce bio.
/linux-6.12.1/drivers/gpu/drm/
Ddrm_cache.c39 /* A small bounce buffer that fits on the stack. */
194 * ttm_populate_and_map_pages(), which bounce buffers so much in in drm_need_swiotlb()
226 * Bounce size is not performance tuned, but using a in memcpy_fallback()
227 * bounce buffer like this is significantly faster than in memcpy_fallback()
230 char bounce[MEMCPY_BOUNCE_SIZE]; in memcpy_fallback() local
235 memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE); in memcpy_fallback()
236 memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE); in memcpy_fallback()
242 memcpy_fromio(bounce, _src, MEMCPY_BOUNCE_SIZE); in memcpy_fallback()
243 memcpy_toio(_dst, bounce, MEMCPY_BOUNCE_SIZE); in memcpy_fallback()
/linux-6.12.1/fs/crypto/
Dcrypto.c54 * Oops, the filesystem called a function that uses the bounce in fscrypt_alloc_bounce_page()
63 * fscrypt_free_bounce_page() - free a ciphertext bounce page
64 * @bounce_page: the bounce page to free, or NULL
66 * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
162 * This allocates a new bounce page and encrypts the given data into it. The
167 * In the bounce page, the ciphertext data will be located at the same offset at
169 * the bounce page will be left uninitialized.
173 * The bounce page allocation is mempool-backed, so it will always succeed when
178 * Return: the new encrypted bounce page on success; an ERR_PTR() on failure
345 /* No need to allocate a bounce page pool if this FS won't use it. */ in fscrypt_initialize()
/linux-6.12.1/drivers/gpu/drm/vmwgfx/
Dvmwgfx_ioctl.c131 void *bounce = NULL; in vmw_get_cap_3d_ioctl() local
149 bounce = vzalloc(size); in vmw_get_cap_3d_ioctl()
150 if (unlikely(bounce == NULL)) { in vmw_get_cap_3d_ioctl()
151 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); in vmw_get_cap_3d_ioctl()
155 ret = vmw_devcaps_copy(dev_priv, vmw_fp->gb_aware, bounce, size); in vmw_get_cap_3d_ioctl()
159 ret = copy_to_user(buffer, bounce, size); in vmw_get_cap_3d_ioctl()
163 vfree(bounce); in vmw_get_cap_3d_ioctl()
/linux-6.12.1/arch/arm/kernel/
Dsigreturn_codes.S98 /* ARM sigreturn restorer FDPIC bounce code snippet */
108 /* Thumb sigreturn restorer FDPIC bounce code snippet */
115 /* ARM sigreturn_rt restorer FDPIC bounce code snippet */
125 /* Thumb sigreturn_rt restorer FDPIC bounce code snippet */
/linux-6.12.1/drivers/scsi/
Dgvp11.c75 /* use bounce buffer if the physical address is bad */ in dma_setup()
95 "GVP II SCSI Bounce Buffer"); in dma_setup()
106 /* copy to bounce buffer for a write */ in dma_setup()
120 "cannot map bounce buffer %p\n", in dma_setup()
141 "GVP II SCSI Bounce Buffer"); in dma_setup()
149 /* copy to bounce buffer for a write */ in dma_setup()
204 /* copy from a bounce buffer, if necessary */ in dma_stop()
Da2091.c87 /* copy to bounce buffer for a write */ in dma_setup()
97 dev_warn(hdata->dev, "cannot map bounce buffer %p\n", in dma_setup()
102 /* the bounce buffer may not be in the first 16M of physmem */ in dma_setup()
172 /* copy from a bounce buffer, if necessary */ in dma_stop()
/linux-6.12.1/fs/bcachefs/
Dio_read.c153 BUG_ON(!rbio->bounce); in promote_start()
211 (*rbio)->bounce = true; in __promote_alloc()
279 bool *bounce, in promote_alloc() argument
314 *bounce = true; in promote_alloc()
356 BUG_ON(rbio->bounce && !rbio->split); in bch2_rbio_free()
362 if (rbio->bounce) in bch2_rbio_free()
583 if (rbio->bounce) { in __bch2_read_endio()
632 if (rbio->bounce) { in __bch2_read_endio()
665 if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) { in __bch2_read_endio()
849 bool bounce = false, read_full = false, narrow_crcs = false; in __bch2_read_extent() local
[all …]
/linux-6.12.1/fs/erofs/
Dcompress.h90 void *bounce; /* bounce buffer for inplace I/Os */ member
91 bool bounced; /* is the bounce buffer used now? */
/linux-6.12.1/arch/arm64/include/asm/
Dspectre.h44 * Bounce via a slot in the hypervisor text mapping of
50 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
56 * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
/linux-6.12.1/include/linux/
Dswiotlb.h18 #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
57 * address for bounce buffer operation.
138 * * pool address if @paddr points into a bounce buffer
139 * * NULL if @paddr does not point into a bounce buffer. As such, this function
140 * can be used to determine if @paddr denotes a swiotlb bounce buffer.
/linux-6.12.1/Documentation/devicetree/bindings/firmware/
Dbrcm,kona-smc.yaml7 title: Broadcom Kona family Secure Monitor bounce buffer
10 A bounce buffer used for non-secure to secure communications.
/linux-6.12.1/tools/testing/selftests/mm/
Duffd-stress.c27 * When all threads of type 3 completed the transfer, one bounce is
29 * respawned and so the bounce is immediately restarted in the
310 * bounce though: that racing UFFDIO_COPY would in userfaultfd_stress()
315 * next bounce, effectively leaving a zeropage in the in userfaultfd_stress()
329 /* bounce pass */ in userfaultfd_stress()
356 /* prepare next bounce */ in userfaultfd_stress()
/linux-6.12.1/kernel/dma/
Dswiotlb.c163 pr_info("SWIOTLB bounce buffer size roundup to %luMB", in swiotlb_adjust_nareas()
226 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); in swiotlb_adjust_size()
326 * By default allocate the bounce buffer memory from low memory, but in swiotlb_memblock_alloc()
351 * Statically reserve bounce buffer space and initialize bounce buffer data
393 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", in swiotlb_init_remap()
841 * original address and the bounce buffer address. High bits are preserved by
843 * padding bytes before the bounce buffer.
857 * Bounce: copy the swiotlb buffer from or back to the original dma location
1006 * @alloc_size: Total requested size of the bounce buffer,
1131 * @alloc_size: Total requested size of the bounce buffer,
[all …]
/linux-6.12.1/arch/x86/kernel/
Dpci-dma.c51 * Set swiotlb to 1 so that bounce buffers are allocated and used for in pci_swiotlb_detect()
59 * bounce buffers as the hypervisor can't access arbitrary VM memory in pci_swiotlb_detect()
183 pr_info("PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); in pci_iommu_init()
/linux-6.12.1/Documentation/block/
Dinline-encryption.rst149 on it being unmodified. Instead, blk-crypto-fallback allocates bounce pages,
150 fills a new bio with those bounce pages, encrypts the data into those bounce
151 pages, and submits that "bounce" bio. When the bounce bio completes,
153 large, multiple bounce bios may be required; see the code for details.
/linux-6.12.1/drivers/char/
Dmem.c106 char *bounce; in read_mem() local
130 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); in read_mem()
131 if (!bounce) in read_mem()
159 probe = copy_from_kernel_nofault(bounce, ptr, sz); in read_mem()
164 remaining = copy_to_user(buf, bounce, sz); in read_mem()
177 kfree(bounce); in read_mem()
183 kfree(bounce); in read_mem()
/linux-6.12.1/drivers/net/ethernet/amazon/ena/
Dena_eth_com.c71 /* Make sure everything was written into the bounce buffer before in ena_com_write_bounce_buffer_to_dev()
72 * writing the bounce buffer to the device in ena_com_write_bounce_buffer_to_dev()
111 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); in ena_com_write_header_to_bounce()
129 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); in get_sq_desc_llq()
149 /* bounce buffer was used, so write it and get a new one */ in ena_com_close_bounce_buffer()
155 "Failed to write bounce buffer to device\n"); in ena_com_close_bounce_buffer()
189 "Failed to write bounce buffer to device\n"); in ena_com_sq_update_llq_tail()
/linux-6.12.1/Documentation/devicetree/bindings/leds/
Dleds-el15203000.txt20 - bounce pattern
21 - inversed bounce pattern
/linux-6.12.1/kernel/module/
Dsysfs.c40 char bounce[MODULE_SECT_READ_SIZE + 1]; in module_sect_read() local
52 * the NUL, we have to use a bounce buffer. in module_sect_read()
54 wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", in module_sect_read()
58 memcpy(buf, bounce, count); in module_sect_read()

12345678910>>...13