Lines Matching +full:has +full:- +full:builtin +full:- +full:dma
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
55 p_slc->sz_k = 128 << slc_cfg.sz; in read_decode_cache_bcr_arcv2()
56 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; in read_decode_cache_bcr_arcv2()
57 n += scnprintf(buf + n, len - n, in read_decode_cache_bcr_arcv2()
59 p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable)); in read_decode_cache_bcr_arcv2()
69 * only ZONE_NORMAL (low mem) and any dma transactions outside this in read_decode_cache_bcr_arcv2()
72 * bounce_buffer to handle dma transactions to HIGHMEM. in read_decode_cache_bcr_arcv2()
88 /* HS 3.0 has limit and strict-ordering fields */ in read_decode_cache_bcr_arcv2()
90 perip_end = (vol.limit << 28) - 1; in read_decode_cache_bcr_arcv2()
93 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", in read_decode_cache_bcr_arcv2()
95 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) ")); in read_decode_cache_bcr_arcv2()
118 p_ic->line_len = 8 << ibcr.line_len; in arc_cache_mumbojumbo()
119 p_ic->sz_k = 1 << (ibcr.sz - 1); in arc_cache_mumbojumbo()
120 p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE); in arc_cache_mumbojumbo()
122 n += scnprintf(buf + n, len - n, in arc_cache_mumbojumbo()
123 "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n", in arc_cache_mumbojumbo()
124 p_ic->sz_k, assoc, p_ic->line_len, in arc_cache_mumbojumbo()
125 p_ic->colors > 1 ? " aliasing" : "", in arc_cache_mumbojumbo()
137 p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE); in arc_cache_mumbojumbo()
141 p_dc->colors = 1; /* PIPT so can't VIPT alias */ in arc_cache_mumbojumbo()
144 p_dc->line_len = 16 << dbcr.line_len; in arc_cache_mumbojumbo()
145 p_dc->sz_k = 1 << (dbcr.sz - 1); in arc_cache_mumbojumbo()
147 n += scnprintf(buf + n, len - n, in arc_cache_mumbojumbo()
148 "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n", in arc_cache_mumbojumbo()
149 p_dc->sz_k, assoc, p_dc->line_len, in arc_cache_mumbojumbo()
155 n += read_decode_cache_bcr_arcv2(c, buf + n, len - n); in arc_cache_mumbojumbo()
161 * Line Operation on {I,D}-Cache
175 * - vaddr in {I,D}C_IV?L
176 * - paddr in {I,D}C_PTAG
179 * Programming model is different for aliasing vs. non-aliasing I$
180 * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
181 * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
183 * - If PAE40 is enabled, independent of aliasing considerations, the higher
202 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v3()
203 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v3()
205 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v3()
206 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v3()
217 * if V-P const for loop, PTAG can be written once outside loop in __cache_line_loop_v3()
224 * Special work for HS38 aliasing I-cache configuration with PAE40 in __cache_line_loop_v3()
225 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v3()
226 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v3()
232 while (num_lines-- > 0) { in __cache_line_loop_v3()
257 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ in __cache_line_loop_v4()
261 /* Ensure we properly floor/ceil the non-line aligned/sized requests in __cache_line_loop_v4()
262 * and have @paddr - aligned to cache line and integral @num_lines. in __cache_line_loop_v4()
264 * -@paddr will be cache-line aligned already (being page aligned) in __cache_line_loop_v4()
265 * -@sz will be integral multiple of line size (being page sized). in __cache_line_loop_v4()
276 * - upper 8 bits of paddr need to be written into PTAG_HI in __cache_line_loop_v4()
277 * - (and needs to be written before the lower 32 bits) in __cache_line_loop_v4()
282 * Non aliasing I-cache in HS38, in __cache_line_loop_v4()
283 * aliasing I-cache handled in __cache_line_loop_v3() in __cache_line_loop_v4()
290 while (num_lines-- > 0) { in __cache_line_loop_v4()
307 /* Only for Non aliasing I-cache in HS38 */ in __cache_line_loop_v4()
325 sz += L1_CACHE_BYTES - 1; in __cache_line_loop_v4()
354 * Machine specific helpers for Entire D-Cache or Per Line ops
366 * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE in __before_dc_op()
367 * flush-n-inv is achieved by INV cmd but with IM=1 in __before_dc_op()
368 * So toggle INV sub-mode depending on op request and default in __before_dc_op()
389 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above in __before_dc_op()
407 /* flush / flush-n-inv both wait */ in __after_dc_op()
418 * Operation on Entire D-Cache
429 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in __dc_entire_op()
458 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
522 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); in __ic_line_inv_vaddr_helper()
565 * - b'000 (default) is Flush, in slc_op_rgn()
566 * - b'001 is Invalidate if CTRL.IM == 0 in slc_op_rgn()
567 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 in slc_op_rgn()
578 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ in slc_op_rgn()
587 * END can't be same as START, so add (l2_line_sz - 1) to sz in slc_op_rgn()
589 end = paddr + sz + l2_line_sz - 1; in slc_op_rgn()
621 const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); in slc_op_line()
645 while (num_lines-- > 0) { in slc_op_line()
674 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ in slc_entire_op()
707 clear_bit(PG_dc_clean, &folio->flags); in flush_dcache_folio()
719 * DMA ops for systems with L1 cache only
738 * DMA ops for systems with both L1 and L2 caches, but without IOC
760 * Exported DMA API
796 tot_sz = kend - kstart; in flush_icache_range()
807 * given the callers for this case: kprobe/kgdb in built-in in flush_icache_range()
810 __sync_icache_dcache(kstart, kstart, kend - kstart); in flush_icache_range()
830 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); in flush_icache_range()
833 tot_sz -= sz; in flush_icache_range()
843 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
845 * builtin kernel page will not have any virtual mappings.
892 clear_bit(PG_dc_clean, &dst->flags); in copy_user_highpage()
893 clear_bit(PG_dc_clean, &src->flags); in copy_user_highpage()
903 clear_bit(PG_dc_clean, &folio->flags); in clear_user_page()
919 * IO-Coherency (IOC) setup rules:
922 * Non-Masters need not be accessing caches at that time
923 * - They are either HALT_ON_RESET and kick started much later or
924 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
930 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
941 * reenabling IOC when DMA might be potentially active is tricky business. in arc_ioc_setup()
972 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); in arc_ioc_setup()
984 /* Re-enable L1 dcache */ in arc_ioc_setup()
990 * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
993 * - IOC setup / dma callbacks only need to be done once
1000 if (!ic->line_len) in arc_cache_init_master()
1001 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1003 if (ic->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1005 ic->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1011 if (is_isa_arcv2() && ic->colors > 1) in arc_cache_init_master()
1020 if (!dc->line_len) in arc_cache_init_master()
1021 panic("cache support enabled but non-existent cache\n"); in arc_cache_init_master()
1023 if (dc->line_len != L1_CACHE_BYTES) in arc_cache_init_master()
1025 dc->line_len, L1_CACHE_BYTES); in arc_cache_init_master()
1027 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ in arc_cache_init_master()
1028 if (is_isa_arcompact() && dc->colors > 1) { in arc_cache_init_master()
1062 * called at all for devices using coherent DMA. in arc_cache_init_master()
1063 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() in arc_cache_init_master()
1076 * And even if PAE is not enabled in kernel, the upper 32-bits still need in arc_cache_init()