Lines Matching +full:dma +full:- +full:related
1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2024 Linaro Ltd.
10 #include <linux/dma-mapping.h>
27 * The IPA has tables defined in its local (IPA-resident) memory that define
29 * endian 64-bit "slot" that holds the address of a rule definition. (The
30 * size of these slots is 64 bits regardless of the host DMA address size.)
38 * an object (such as a route or filter table) in IPA-resident memory must
39 * 128-byte aligned. An object in system memory (such as a route or filter
40 * rule) must be at an 8-byte aligned address. We currently only place
43 * A rule consists of a contiguous block of 32-bit values terminated with
49 * not all TX endpoints support filtering. The first 64-bit slot in a
53 * v5.0, the low-order bit (bit 0) in this bitmap represents a special
59 * position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
79 * ----------------------
81 * |--------------------|
82 * 1st endpoint | 0x000123456789abc0 | DMA address for modem endpoint 2 rule
83 * |--------------------|
84 * 2nd endpoint | 0x000123456789abf0 | DMA address for AP endpoint 5 rule
85 * |--------------------|
87 * |--------------------|
89 * |--------------------|
91 * ----------------------
100 * ----------------------
101 * 1st modem route | 0x0001234500001100 | DMA address for first route rule
102 * |--------------------|
103 * 2nd modem route | 0x0001234500001140 | DMA address for second route rule
104 * |--------------------|
106 * |--------------------|
107 * Last modem route| 0x0001234500002280 | DMA address for Nth route rule
108 * |--------------------|
109 * 1st AP route | 0x0001234500001100 | DMA address for route rule (N+1)
110 * |--------------------|
111 * 2nd AP route | 0x0001234500001140 | DMA address for next route rule
112 * |--------------------|
114 * |--------------------|
115 * Last AP route | 0x0001234500002280 | DMA address for last route rule
116 * ----------------------
119 /* Filter or route rules consist of a set of 32-bit values followed by a
120 * 32-bit all-zero rule list terminator. The "zero rule" is simply an
121 * all-zero rule followed by the list terminator.
128 /* Filter and route tables contain DMA addresses that refer in ipa_table_validate_build()
130 * is 64 bits regardless of what the size of an AP DMA address in ipa_table_validate_build()
138 * It is a 64-bit block of zeroed memory. Code in ipa_table_init() in ipa_table_validate_build()
164 return ipa->version != IPA_VERSION_4_2; in ipa_table_hash_support()
169 struct device *dev = ipa->dev; in ipa_filtered_valid()
179 if (count > ipa->filter_count) { in ipa_filtered_valid()
181 count, ipa->filter_count); in ipa_filtered_valid()
197 WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count)); in ipa_table_addr()
202 return ipa->table_addr + skip * sizeof(*ipa->table_virt); in ipa_table_addr()
208 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_reset_add()
216 if (!mem || !mem->size) in ipa_table_reset_add()
222 offset = mem->offset + first * sizeof(__le64); in ipa_table_reset_add()
231 * for the IPv4 and IPv6 non-hashed and hashed filter tables.
236 u64 ep_mask = ipa->filtered; in ipa_filter_reset_table()
242 dev_err(ipa->dev, "no transaction for %s filter reset\n", in ipa_filter_reset_table()
244 return -EBUSY; in ipa_filter_reset_table()
254 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_reset_table()
255 if (endpoint->ee_id != ee_id) in ipa_filter_reset_table()
291 * won't exceed the per-transaction command limit.
296 u32 modem_route_count = ipa->modem_route_count; in ipa_route_reset()
303 dev_err(ipa->dev, "no transaction for %s route reset\n", in ipa_route_reset()
305 return -EBUSY; in ipa_route_reset()
313 count = ipa->route_count - modem_route_count; in ipa_route_reset()
331 struct device *dev = ipa->dev; in ipa_table_reset()
360 dev_err(ipa->dev, "no transaction for hash flush\n"); in ipa_table_hash_flush()
361 return -EBUSY; in ipa_table_hash_flush()
364 if (ipa->version < IPA_VERSION_5_0) { in ipa_table_hash_flush()
388 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_init_add()
407 /* The non-hashed region will exist (see ipa_table_mem_valid()) */ in ipa_table_init_add()
410 hash_offset = hash_mem ? hash_mem->offset : 0; in ipa_table_init_add()
417 * table is either the same as the non-hashed one, or zero. in ipa_table_init_add()
419 count = 1 + hweight64(ipa->filtered); in ipa_table_init_add()
420 hash_count = hash_mem && hash_mem->size ? count : 0; in ipa_table_init_add()
425 count = mem->size / sizeof(__le64); in ipa_table_init_add()
426 hash_count = hash_mem ? hash_mem->size / sizeof(__le64) : 0; in ipa_table_init_add()
434 ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr, in ipa_table_init_add()
440 zero_offset = mem->offset + size; in ipa_table_init_add()
441 zero_size = mem->size - size; in ipa_table_init_add()
443 ipa->zero_addr, true); in ipa_table_init_add()
449 zero_size = hash_mem->size - hash_size; in ipa_table_init_add()
451 ipa->zero_addr, true); in ipa_table_init_add()
459 * - IPv4: in ipa_table_setup()
460 * - One for route table initialization (non-hashed and hashed) in ipa_table_setup()
461 * - One for filter table initialization (non-hashed and hashed) in ipa_table_setup()
462 * - One to zero unused entries in the non-hashed filter table in ipa_table_setup()
463 * - One to zero unused entries in the hashed filter table in ipa_table_setup()
464 * - IPv6: in ipa_table_setup()
465 * - One for route table initialization (non-hashed and hashed) in ipa_table_setup()
466 * - One for filter table initialization (non-hashed and hashed) in ipa_table_setup()
467 * - One to zero unused entries in the non-hashed filter table in ipa_table_setup()
468 * - One to zero unused entries in the hashed filter table in ipa_table_setup()
473 dev_err(ipa->dev, "no transaction for table setup\n"); in ipa_table_setup()
474 return -EBUSY; in ipa_table_setup()
488 * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
496 u32 endpoint_id = endpoint->endpoint_id; in ipa_filter_tuple_zero()
497 struct ipa *ipa = endpoint->ipa; in ipa_filter_tuple_zero()
502 if (ipa->version < IPA_VERSION_5_0) { in ipa_filter_tuple_zero()
506 val = ioread32(endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
508 /* Zero all filter-related fields, preserving the rest */ in ipa_filter_tuple_zero()
515 /* Zero all filter-related fields */ in ipa_filter_tuple_zero()
519 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
526 u64 ep_mask = ipa->filtered; in ipa_filter_config()
537 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_config()
538 if (endpoint->ee_id == ee_id) in ipa_filter_config()
545 return route_id < ipa->modem_route_count; in ipa_route_id_modem()
549 * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
561 if (ipa->version < IPA_VERSION_5_0) { in ipa_route_tuple_zero()
565 val = ioread32(ipa->reg_virt + offset); in ipa_route_tuple_zero()
567 /* Zero all route-related fields, preserving the rest */ in ipa_route_tuple_zero()
574 /* Zero all route-related fields */ in ipa_route_tuple_zero()
578 iowrite32(val, ipa->reg_virt + offset); in ipa_route_tuple_zero()
589 for (route_id = 0; route_id < ipa->route_count; route_id++) in ipa_route_config()
614 /* IPv4 and IPv6 non-hashed tables are expected to be defined and in ipa_table_mem_valid()
626 if (mem_ipv4->size != mem_ipv6->size) in ipa_table_mem_valid()
630 count = mem_ipv4->size / sizeof(__le64); in ipa_table_mem_valid()
634 ipa->filter_count = count - 1; /* Filter map in first entry */ in ipa_table_mem_valid()
636 ipa->route_count = count; in ipa_table_mem_valid()
647 if (count < 1 + hweight64(ipa->filtered)) in ipa_table_mem_valid()
653 if (count < ipa->modem_route_count + 1) in ipa_table_mem_valid()
658 * and have the same size as non-hashed tables. If hashing is not in ipa_table_mem_valid()
664 if (!mem_hashed || mem_hashed->size != mem_ipv4->size) in ipa_table_mem_valid()
667 if (mem_hashed && mem_hashed->size) in ipa_table_mem_valid()
674 if (!mem_hashed || mem_hashed->size != mem_ipv6->size) in ipa_table_mem_valid()
677 if (mem_hashed && mem_hashed->size) in ipa_table_mem_valid()
684 /* Initialize a coherent DMA allocation containing initialized filter and
691 * entries are 64 bits wide, and (other than the bitmap) contain the DMA
696 * Each entry in a route table is the DMA address of a routing rule. For
697 * routing there is also a 64-bit "zero rule" that means no routing, and
701 * +-------------------+
702 * --> | zero rule |
703 * / |-------------------|
705 * |\ |-------------------|
706 * | ---- zero rule address | \
707 * |\ |-------------------| |
708 * | ---- zero rule address | | Max IPA filter count
709 * | |-------------------| > or IPA route count,
711 * \ |-------------------| |
712 * ---- zero rule address | /
713 * +-------------------+
717 struct device *dev = ipa->dev; in ipa_table_init()
726 count = max_t(u32, ipa->filter_count, ipa->route_count); in ipa_table_init()
729 * aligned on a 128-byte boundary. We put the "zero rule" at the in ipa_table_init()
730 * base of the table area allocated here. The DMA address returned in ipa_table_init()
731 * by dma_alloc_coherent() is guaranteed to be a power-of-2 number in ipa_table_init()
737 return -ENOMEM; in ipa_table_init()
739 ipa->table_virt = virt; in ipa_table_init()
740 ipa->table_addr = addr; in ipa_table_init()
751 if (ipa->version < IPA_VERSION_5_0) in ipa_table_init()
752 *virt++ = cpu_to_le64(ipa->filtered << 1); in ipa_table_init()
754 *virt++ = cpu_to_le64(ipa->filtered); in ipa_table_init()
756 /* All the rest contain the DMA address of the zero rule */ in ipa_table_init()
758 while (count--) in ipa_table_init()
766 u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count); in ipa_table_exit()
767 struct device *dev = ipa->dev; in ipa_table_exit()
772 dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); in ipa_table_exit()
773 ipa->table_addr = 0; in ipa_table_exit()
774 ipa->table_virt = NULL; in ipa_table_exit()