Home
last modified time | relevance | path

Searched refs:slices (Results 1 – 25 of 48) sorted by relevance

12

/linux-6.12.1/block/partitions/
Dsysv68.c51 int i, slices; in sysv68_partition() local
68 slices = be16_to_cpu(b->dk_ios.ios_slccnt); in sysv68_partition()
76 slices -= 1; /* last slice is the whole disk */ in sysv68_partition()
77 snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices); in sysv68_partition()
80 for (i = 0; i < slices; i++, slice++) { in sysv68_partition()
/linux-6.12.1/drivers/hte/
Dhte-tegra194.c120 u32 slices; member
326 .slices = 3,
335 .slices = 3,
342 .slices = 11,
349 .slices = 17,
689 u32 i, slices, val = 0; in tegra_hte_probe() local
709 ret = of_property_read_u32(dev->of_node, "nvidia,slices", &slices); in tegra_hte_probe()
711 slices = hte_dev->prov_data->slices; in tegra_hte_probe()
713 dev_dbg(dev, "slices:%d\n", slices); in tegra_hte_probe()
714 nlines = slices << 5; in tegra_hte_probe()
[all …]
/linux-6.12.1/drivers/phy/lantiq/
Dphy-lantiq-vrx200-pcie.c202 static const struct reg_default slices[] = { in ltq_vrx200_pcie_phy_apply_workarounds() local
218 for (i = 0; i < ARRAY_SIZE(slices); i++) { in ltq_vrx200_pcie_phy_apply_workarounds()
220 regmap_update_bits(priv->phy_regmap, slices[i].reg, in ltq_vrx200_pcie_phy_apply_workarounds()
221 slices[i].def, slices[i].def); in ltq_vrx200_pcie_phy_apply_workarounds()
226 regmap_update_bits(priv->phy_regmap, slices[i].reg, in ltq_vrx200_pcie_phy_apply_workarounds()
227 slices[i].def, 0x0); in ltq_vrx200_pcie_phy_apply_workarounds()
/linux-6.12.1/Documentation/admin-guide/perf/
Dqcom_l3_pmu.rst6 Centriq SoCs. The L3 cache on these SOCs is composed of multiple slices, shared
9 for aggregating across slices.
/linux-6.12.1/drivers/crypto/intel/qat/qat_common/
Dadf_admin.c334 struct icp_qat_fw_init_admin_slice_cnt *slices) in adf_send_admin_rl_init() argument
347 memcpy(slices, &resp.slices, sizeof(*slices)); in adf_send_admin_rl_init()
521 memcpy(slice_count, &resp.slices, sizeof(*slice_count)); in adf_send_admin_tl_start()
Dadf_admin.h18 struct icp_qat_fw_init_admin_slice_cnt *slices);
Dadf_rl.c565 avail_slice_cycles *= device_data->slices.pke_cnt; in adf_rl_calculate_slice_tokens()
568 avail_slice_cycles *= device_data->slices.cph_cnt; in adf_rl_calculate_slice_tokens()
571 avail_slice_cycles *= device_data->slices.dcpr_cnt; in adf_rl_calculate_slice_tokens()
623 sla_to_bytes *= device_data->slices.dcpr_cnt - in adf_rl_calculate_pci_bw()
1139 ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); in adf_rl_start()
Dadf_rl.h97 struct rl_slice_cnt slices; member
Dicp_qat_fw_init_admin.h160 struct icp_qat_fw_init_admin_slice_cnt slices; member
/linux-6.12.1/drivers/net/dsa/
Dbcm_sf2_cfp.c29 u8 slices[UDFS_PER_SLICE]; member
44 .slices = {
69 .slices = {
93 .slices = {
149 if (memcmp(slice_layout->slices, zero_slice, in bcm_sf2_get_slice_number()
165 core_writel(priv, layout->udfs[slice_num].slices[i], in bcm_sf2_cfp_udf_set()
410 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); in bcm_sf2_cfp_ipv4_rule_set()
668 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); in bcm_sf2_cfp_ipv6_rule_set()
774 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); in bcm_sf2_cfp_ipv6_rule_set()
/linux-6.12.1/Documentation/scheduler/
Dsched-eevdf.rst21 allows latency-sensitive tasks with shorter time slices to be prioritized,
31 can request specific time slices using the new sched_setattr() system call,
Dschedutil.rst15 individual tasks to task-group slices to CPU runqueues. As the basis for this
31 Note that blocked tasks still contribute to the aggregates (task-group slices
/linux-6.12.1/drivers/misc/cxl/
Dpci.c1304 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); in cxl_read_vsec()
1318 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; in cxl_read_vsec()
1376 if (!adapter->slices) { in cxl_vsec_looks_ok()
1554 for (slice = 0; slice < adapter->slices; slice++) { in cxl_stop_trace_psl8()
1753 for (slice = 0; slice < adapter->slices; slice++) { in cxl_probe()
1777 for (i = 0; i < adapter->slices; i++) { in cxl_remove()
1838 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1932 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1986 for (i = 0; i < adapter->slices; i++) { in cxl_pci_slot_reset()
2069 for (i = 0; i < adapter->slices; i++) { in cxl_pci_resume()
Dof.c279 for (afu = 0; afu < adapter->slices; afu++) in cxl_of_remove()
323 adapter->slices = 0; in cxl_of_probe()
Dguest.c273 for (i = 0; i < adapter->slices; i++) { in guest_reset()
282 for (i = 0; i < adapter->slices; i++) { in guest_reset()
943 adapter->slices++; in cxl_guest_init_afu()
1117 adapter->slices = 0; in cxl_guest_init_adapter()
Dmain.c89 for (slice = 0; slice < adapter->slices; slice++) { in cxl_slbia_core()
/linux-6.12.1/drivers/gpu/drm/i915/gt/
Dintel_sseu.c662 u8 slices, subslices; in intel_sseu_make_rpcs() local
679 slices = hweight8(req_sseu->slice_mask); in intel_sseu_make_rpcs()
708 slices == 1 && in intel_sseu_make_rpcs()
713 slices *= 2; in intel_sseu_make_rpcs()
723 u32 mask, val = slices; in intel_sseu_make_rpcs()
/linux-6.12.1/drivers/gpu/drm/i915/gem/selftests/
Di915_gem_context.c1137 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, in __check_rpcs() argument
1140 if (slices == expected) in __check_rpcs()
1143 if (slices < 0) { in __check_rpcs()
1145 name, prefix, slices, suffix); in __check_rpcs()
1146 return slices; in __check_rpcs()
1150 name, prefix, slices, expected, suffix); in __check_rpcs()
1153 rpcs, slices, in __check_rpcs()
1169 unsigned int slices = hweight32(ce->engine->sseu.slice_mask); in __sseu_finish() local
1186 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); in __sseu_finish()
/linux-6.12.1/drivers/gpu/drm/i915/display/
Dskl_watermark.c631 if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) in intel_crtc_dbuf_weights()
667 dbuf_slice_mask = new_dbuf_state->slices[pipe]; in skl_crtc_allocate_ddb()
684 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && in skl_crtc_allocate_ddb()
707 old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], in skl_crtc_allocate_ddb()
2483 enabled_slices |= dbuf_state->slices[pipe]; in intel_dbuf_enabled_slices()
2533 new_dbuf_state->slices[pipe] = in skl_compute_ddb()
2537 if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) in skl_compute_ddb()
2989 u8 slices; in skl_wm_get_hw_state() local
3021 slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, in skl_wm_get_hw_state()
3023 mbus_offset = mbus_ddb_offset(i915, slices); in skl_wm_get_hw_state()
[all …]
Dskl_watermark.h62 u8 slices[I915_MAX_PIPES]; member
/linux-6.12.1/fs/bfs/
DKconfig13 to "UnixWare slices support", below. More information about the BFS
/linux-6.12.1/arch/arm64/kvm/
Dguest.c654 const unsigned int slices = vcpu_sve_slices(vcpu); in num_sve_regs() local
662 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) in num_sve_regs()
669 const unsigned int slices = vcpu_sve_slices(vcpu); in copy_sve_reg_indices() local
689 for (i = 0; i < slices; i++) { in copy_sve_reg_indices()
/linux-6.12.1/drivers/usb/dwc2/
Dhcd_queue.c521 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); in dwc2_ls_pmap_schedule() local
541 DWC2_LS_SCHEDULE_FRAMES, slices, in dwc2_ls_pmap_schedule()
560 int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE); in dwc2_ls_pmap_unschedule() local
568 DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval, in dwc2_ls_pmap_unschedule()
/linux-6.12.1/drivers/accel/qaic/
Dqaic.h189 struct list_head slices; member
/linux-6.12.1/Documentation/filesystems/
Dbfs.rst12 know the partition number and the kernel must support UnixWare disk slices

12