/linux-6.12.1/kernel/ |
D | kexec.c | 24 struct kexec_segment *segments, in kimage_alloc_init() argument 47 memcpy(image->segment, segments, nr_segments * sizeof(*segments)); in kimage_alloc_init() 92 struct kexec_segment *segments, unsigned long flags) in do_kexec_load() argument 130 ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); in do_kexec_load() 243 struct kexec_segment __user *, segments, unsigned long, flags) in SYSCALL_DEFINE4() 257 ksegments = memdup_array_user(segments, nr_segments, sizeof(ksegments[0])); in SYSCALL_DEFINE4() 270 struct compat_kexec_segment __user *, segments, in COMPAT_SYSCALL_DEFINE4() argument 293 result = copy_from_user(&in, &segments[i], sizeof(in)); in COMPAT_SYSCALL_DEFINE4()
|
/linux-6.12.1/rust/macros/ |
D | paste.rs | 7 let mut segments = Vec::new(); in concat() localVariable 19 segments.push((value, lit.span())); in concat() 26 segments.push((value, ident.span())); in concat() 33 let (mut value, sp) = segments.pop().expect("expected identifier before modifier"); in concat() 47 segments.push((value, sp)); in concat() 53 let pasted: String = segments.into_iter().map(|x| x.0).collect(); in concat()
|
/linux-6.12.1/block/ |
D | blk-integrity.c | 30 unsigned int segments = 0; in blk_rq_count_integrity_sg() local 46 segments++; in blk_rq_count_integrity_sg() 54 return segments; in blk_rq_count_integrity_sg() 73 unsigned int segments = 0; in blk_rq_map_integrity_sg() local 95 segments++; in blk_rq_map_integrity_sg() 109 BUG_ON(segments > rq->nr_integrity_segments); in blk_rq_map_integrity_sg() 110 BUG_ON(segments > queue_max_integrity_segments(q)); in blk_rq_map_integrity_sg() 111 return segments; in blk_rq_map_integrity_sg()
|
/linux-6.12.1/include/uapi/linux/ |
D | rpl.h | 42 } segments; member 45 #define rpl_segaddr segments.addr 46 #define rpl_segdata segments.data
|
/linux-6.12.1/Documentation/arch/powerpc/ |
D | pci_iov_resource_on_powernv.rst | 95 * It is divided into 256 segments of equal size. A table in the chip 108 more segments. 120 has 256 segments; however, there is no table for mapping a segment 135 trick, to match to those giant segments. 144 - We cannot "group" segments in HW, so if a device ends up using more 153 PEs" that are used for the remaining M64 segments. 189 equally-sized segments. The finest granularity possible is a 256MB 190 window with 1MB segments. VF BARs that are 1MB or larger could be 196 BARs span several segments. 202 like the M32 window, but the segments can't be individually mapped to [all …]
|
/linux-6.12.1/arch/arm/mm/ |
D | proc-arm940.S | 117 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 123 bcs 1b @ segments 3 to 0 172 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 178 bcs 1b @ segments 7 to 0 195 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 201 bcs 1b @ segments 7 to 0 218 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 224 bcs 1b @ segments 7 to 0 241 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 252 bcs 1b @ segments 7 to 0
|
/linux-6.12.1/drivers/net/ethernet/sfc/ |
D | ef100_tx.c | 383 unsigned int segments; in __ef100_enqueue_skb() local 392 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __ef100_enqueue_skb() 393 if (segments == 1) in __ef100_enqueue_skb() 394 segments = 0; /* Don't use TSO/GSO for a single segment. */ in __ef100_enqueue_skb() 395 if (segments && !ef100_tx_can_tso(tx_queue, skb)) { in __ef100_enqueue_skb() 448 rc = efx_tx_map_data(tx_queue, skb, segments); in __ef100_enqueue_skb() 451 ef100_tx_make_descriptors(tx_queue, skb, segments, efv); in __ef100_enqueue_skb() 489 if (segments) { in __ef100_enqueue_skb() 491 tx_queue->tso_packets += segments; in __ef100_enqueue_skb() 492 tx_queue->tx_packets += segments; in __ef100_enqueue_skb()
|
D | tx.c | 327 unsigned int segments; in __efx_enqueue_skb() local 332 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __efx_enqueue_skb() 333 if (segments == 1) in __efx_enqueue_skb() 334 segments = 0; /* Don't use TSO for a single segment. */ in __efx_enqueue_skb() 340 if (segments) { in __efx_enqueue_skb() 379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) in __efx_enqueue_skb() 390 if (segments) { in __efx_enqueue_skb() 392 tx_queue->tso_packets += segments; in __efx_enqueue_skb() 393 tx_queue->tx_packets += segments; in __efx_enqueue_skb()
|
D | tx_common.c | 453 struct sk_buff *segments, *next; in efx_tx_tso_fallback() local 455 segments = skb_gso_segment(skb, 0); in efx_tx_tso_fallback() 456 if (IS_ERR(segments)) in efx_tx_tso_fallback() 457 return PTR_ERR(segments); in efx_tx_tso_fallback() 461 skb_list_walk_safe(segments, skb, next) { in efx_tx_tso_fallback()
|
/linux-6.12.1/drivers/net/ethernet/sfc/siena/ |
D | tx.c | 145 unsigned int segments; in __efx_siena_enqueue_skb() local 150 segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; in __efx_siena_enqueue_skb() 151 if (segments == 1) in __efx_siena_enqueue_skb() 152 segments = 0; /* Don't use TSO for a single segment. */ in __efx_siena_enqueue_skb() 158 if (segments) { in __efx_siena_enqueue_skb() 173 if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments))) in __efx_siena_enqueue_skb()
|
D | tx_common.c | 434 struct sk_buff *segments, *next; in efx_siena_tx_tso_fallback() local 436 segments = skb_gso_segment(skb, 0); in efx_siena_tx_tso_fallback() 437 if (IS_ERR(segments)) in efx_siena_tx_tso_fallback() 438 return PTR_ERR(segments); in efx_siena_tx_tso_fallback() 442 skb_list_walk_safe(segments, skb, next) { in efx_siena_tx_tso_fallback()
|
/linux-6.12.1/Documentation/ABI/testing/ |
D | sysfs-fs-nilfs2 | 183 What: /sys/fs/nilfs2/<device>/segments/segments_number 187 Show number of segments on a volume. 189 What: /sys/fs/nilfs2/<device>/segments/blocks_per_segment 195 What: /sys/fs/nilfs2/<device>/segments/clean_segments 199 Show count of clean segments. 201 What: /sys/fs/nilfs2/<device>/segments/dirty_segments 205 Show count of dirty segments. 207 What: /sys/fs/nilfs2/<device>/segments/README 211 Describe attributes of /sys/fs/nilfs2/<device>/segments
|
D | sysfs-driver-jz4780-efuse | 6 split into segments. The driver supports read only. 7 The segments are:
|
/linux-6.12.1/drivers/gpu/drm/ |
D | drm_panic_qr.rs | 211 fn from_segments(segments: &[&Segment<'_>]) -> Option<Version> { in from_segments() 213 if v.max_data() * 8 >= segments.iter().map(|s| s.total_size_bits(v)).sum() { in from_segments() 482 fn new<'a, 'b>(segments: &[&Segment<'b>], data: &'a mut [u8]) -> Option<EncodedMsg<'a>> { in new() 483 let version = Version::from_segments(segments)?; in new() 504 em.encode(segments); in new() 539 fn add_segments(&mut self, segments: &[&Segment<'_>]) { in add_segments() 542 for s in segments.iter() { in add_segments() 591 fn encode(&mut self, segments: &[&Segment<'_>]) { in encode() 592 self.add_segments(segments); in encode() 963 let segments = &[ in drm_panic_qr_generate() localVariable [all …]
|
/linux-6.12.1/drivers/dma/xilinx/ |
D | xilinx_dma.c | 373 struct list_head segments; member 649 seg = list_first_entry(&desc->segments, in xilinx_dma_get_metadata_ptr() 841 INIT_LIST_HEAD(&desc->segments); in xilinx_dma_alloc_tx_descriptor() 864 list_for_each_entry_safe(segment, next, &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 870 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 876 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 882 &desc->segments, node) { in xilinx_dma_free_tx_descriptor() 992 list_for_each(entry, &desc->segments) { in xilinx_dma_get_residue() 1428 list_for_each_entry(segment, &desc->segments, node) { in xilinx_vdma_start_transfer() 1483 tail_segment = list_last_entry(&tail_desc->segments, in xilinx_cdma_start_transfer() [all …]
|
/linux-6.12.1/drivers/bus/mhi/host/ |
D | boot.c | 318 int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; in mhi_alloc_bhie_table() local 328 img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), in mhi_alloc_bhie_table() 335 for (i = 0; i < segments; i++, mhi_buf++) { in mhi_alloc_bhie_table() 339 if (i == segments - 1) in mhi_alloc_bhie_table() 350 img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; in mhi_alloc_bhie_table() 351 img_info->entries = segments; in mhi_alloc_bhie_table()
|
/linux-6.12.1/Documentation/i2c/muxes/ |
D | i2c-mux-gpio.rst | 10 i2c-mux-gpio is an i2c mux driver providing access to I2C bus segments 34 bus, the number of bus segments to create and the GPIO pins used 37 E.G. something like this for a MUX providing 4 bus segments
|
/linux-6.12.1/drivers/block/xen-blkback/ |
D | blkback.c | 707 struct grant_page **pages = req->segments; in xen_blkbk_unmap_and_respond() 906 rc = xen_blkbk_map(pending_req->ring, pending_req->segments, in xen_blkbk_map_seg() 921 struct blkif_request_segment *segments = NULL; in xen_blkbk_parse_indirect() local 939 if (segments) in xen_blkbk_parse_indirect() 940 kunmap_atomic(segments); in xen_blkbk_parse_indirect() 941 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); in xen_blkbk_parse_indirect() 945 pending_req->segments[n]->gref = segments[i].gref; in xen_blkbk_parse_indirect() 947 first_sect = READ_ONCE(segments[i].first_sect); in xen_blkbk_parse_indirect() 948 last_sect = READ_ONCE(segments[i].last_sect); in xen_blkbk_parse_indirect() 960 if (segments) in xen_blkbk_parse_indirect() [all …]
|
/linux-6.12.1/drivers/media/dvb-frontends/ |
D | mb86a20s.c | 1344 static u32 interpolate_value(u32 value, const struct linear_segments *segments, in interpolate_value() argument 1351 if (value >= segments[0].x) in interpolate_value() 1352 return segments[0].y; in interpolate_value() 1353 if (value < segments[len-1].x) in interpolate_value() 1354 return segments[len-1].y; in interpolate_value() 1358 if (value == segments[i].x) in interpolate_value() 1359 return segments[i].y; in interpolate_value() 1360 if (value > segments[i].x) in interpolate_value() 1365 dy = segments[i].y - segments[i - 1].y; in interpolate_value() 1366 dx = segments[i - 1].x - segments[i].x; in interpolate_value() [all …]
|
/linux-6.12.1/Documentation/networking/ |
D | tcp_ao.rst | 8 segments between trusted peers. It adds a new TCP header option with 49 |replayed TCP segments | |Extension (SNE) and | 131 of segments with TCP-AO but that do not match an MKT. The initial default 135 Alternately, the configuration can be changed to discard segments with 141 segments with TCP-AO are not discarded solely because they include 145 segments with unknown key signatures are discarded with warnings logged. 153 >> All TCP segments MUST be checked against the set of MKTs for matching 175 by TCP-AO when processing received TCP segments as discussed in the segment 179 segments are received out of order, and is considered a feature of TCP-AO, 217 that would allow accepting segments without a sign (which would be insecure). [all …]
|
/linux-6.12.1/drivers/gpu/drm/arm/ |
D | malidp_crtc.c | 99 } segments[MALIDP_COEFFTAB_NUM_COEFFS] = { variable 136 delta_in = segments[i].end - segments[i].start; in malidp_generate_gamma_table() 138 out_start = drm_color_lut_extract(lut[segments[i].start].green, in malidp_generate_gamma_table() 140 out_end = drm_color_lut_extract(lut[segments[i].end].green, 12); in malidp_generate_gamma_table()
|
/linux-6.12.1/drivers/scsi/mpi3mr/ |
D | mpi3mr_fw.c | 508 struct segments *segments = op_reply_q->q_segments; in mpi3mr_get_reply_desc() local 512 segments[reply_ci / op_reply_q->segment_qd].segment; in mpi3mr_get_reply_desc() 1770 struct segments *segments; in mpi3mr_free_op_req_q_segments() local 1772 segments = mrioc->req_qinfo[q_idx].q_segments; in mpi3mr_free_op_req_q_segments() 1773 if (!segments) in mpi3mr_free_op_req_q_segments() 1790 if (!segments[j].segment) in mpi3mr_free_op_req_q_segments() 1793 size, segments[j].segment, segments[j].segment_dma); in mpi3mr_free_op_req_q_segments() 1794 segments[j].segment = NULL; in mpi3mr_free_op_req_q_segments() 1814 struct segments *segments; in mpi3mr_free_op_reply_q_segments() local 1816 segments = mrioc->op_reply_qinfo[q_idx].q_segments; in mpi3mr_free_op_reply_q_segments() [all …]
|
/linux-6.12.1/arch/powerpc/platforms/cell/ |
D | iommu.c | 303 unsigned long segments, stab_size; in cell_iommu_setup_stab() local 305 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; in cell_iommu_setup_stab() 308 __func__, iommu->nid, segments); in cell_iommu_setup_stab() 311 stab_size = segments * sizeof(unsigned long); in cell_iommu_setup_stab() 324 unsigned long reg, segments, pages_per_segment, ptab_size, in cell_iommu_alloc_ptab() local 328 segments = size >> IO_SEGMENT_SHIFT; in cell_iommu_alloc_ptab() 334 ptab_size = segments * pages_per_segment * sizeof(unsigned long); in cell_iommu_alloc_ptab() 365 for (i = start_seg; i < (start_seg + segments); i++) { in cell_iommu_alloc_ptab()
|
/linux-6.12.1/drivers/gpu/drm/amd/display/dc/optc/dcn32/ |
D | dcn32_optc.c | 103 int segments; in optc32_get_odm_combine_segments() local 105 REG_GET(OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, &segments); in optc32_get_odm_combine_segments() 107 switch (segments) { in optc32_get_odm_combine_segments()
|
/linux-6.12.1/Documentation/filesystems/ |
D | nilfs2.rst | 116 segments. This ioctl is used in lssu, 121 segments. This ioctl is used by 123 cleaning operation of segments and reduce 149 NILFS_IOCTL_SET_ALLOC_RANGE Define lower limit of segments in bytes and 150 upper limit of segments in bytes. This ioctl 191 A nilfs2 volume is equally divided into a number of segments except 240 3) Segment usage file (sufile) -- Stores allocation state of segments
|