1 /*
2  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: qdf_dev
22  * QCA driver framework (QDF) device management APIs
23  */
24 
25 #if !defined(__I_QDF_DEV_H)
26 #define __I_QDF_DEV_H
27 
28 /* Include Files */
29 #include <qdf_types.h>
30 #include "qdf_util.h"
31 #include <linux/irq.h>
32 #ifdef CONFIG_SCHED_CORE_CTL
33 #include <linux/sched/core_ctl.h>
34 #endif
35 
36 struct qdf_cpu_mask;
37 struct qdf_devm;
38 struct qdf_dev;
39 
40 #define __qdf_cpumask_pr_args(maskp) cpumask_pr_args(maskp)
41 #define __qdf_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
42 #define __qdf_for_each_online_cpu(cpu) for_each_online_cpu(cpu)
43 #define __qdf_for_each_cpu(cpu, maskp) \
44 for_each_cpu(cpu, maskp)
45 
46 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0))
47 #define __qdf_for_each_cpu_not(cpu, maskp) \
48 for_each_cpu_andnot(cpu, cpu_possible_mask, maskp)
49 #else
50 #define __qdf_for_each_cpu_not(cpu, maskp) \
51 for_each_cpu_not(cpu, maskp)
52 #endif
53 
54 /**
55  * __qdf_dev_alloc_mem() - allocate memory
56  * @qdfdev: Device handle
57  * @mrptr: Pointer to the allocated memory
58  * @reqsize: Allocation request in bytes
59  * @mask: Property mask to be associated to the allocated memory
60  *
61  * This function will acquire memory to be associated with a device
62  *
63  * Return: QDF_STATUS_SUCCESS on success
64  */
65 static inline QDF_STATUS
__qdf_dev_alloc_mem(struct qdf_dev * qdfdev,struct qdf_devm ** mrptr,uint32_t reqsize,uint32_t mask)66 __qdf_dev_alloc_mem(struct qdf_dev *qdfdev, struct qdf_devm **mrptr,
67 		    uint32_t reqsize, uint32_t mask)
68 {
69 	*mrptr = devm_kzalloc((struct device *)qdfdev, reqsize, mask);
70 
71 	if (!*mrptr)
72 		return QDF_STATUS_E_NOMEM;
73 
74 	return QDF_STATUS_SUCCESS;
75 }
76 
77 /**
78  * __qdf_dev_release_mem() - release memory
79  * @qdfdev: Device handle
80  * @mrptr: Pointer to the allocated memory
81  *
82  * This function will acquire memory to be associated with a device
83  *
84  * Return: QDF_STATUS_SUCCESS on success
85  */
86 static inline QDF_STATUS
__qdf_dev_release_mem(struct qdf_dev * qdfdev,struct qdf_devm * mrptr)87 __qdf_dev_release_mem(struct qdf_dev *qdfdev, struct qdf_devm *mrptr)
88 {
89 	devm_kfree((struct device *)qdfdev, mrptr);
90 
91 	return QDF_STATUS_SUCCESS;
92 }
93 
94 /**
95  * __qdf_dev_modify_irq_status() - modify irq
96  * @irnum: irq number
97  * @cmask: Bitmap to be cleared for the property mask
98  * @smask: Bitmap to be set for the property mask
99  *
100  * This function will modify the properties of the irq associated with a device
101  *
102  * Return: QDF_STATUS_SUCCESS on success
103  */
104 static inline QDF_STATUS
__qdf_dev_modify_irq_status(uint32_t irnum,unsigned long cmask,unsigned long smask)105 __qdf_dev_modify_irq_status(uint32_t irnum, unsigned long cmask,
106 			    unsigned long smask)
107 {
108 	irq_modify_status(irnum, cmask, smask);
109 
110 	return QDF_STATUS_SUCCESS;
111 }
112 
113 /**
114  * __qdf_dev_set_irq_affinity() - set irq affinity
115  * @irnum: irq number
116  * @cpmask: cpu affinity bitmap
117  *
118  * This function will set the affinity level for an irq
119  *
120  * Return: QDF_STATUS_SUCCESS on success
121  */
122 static inline QDF_STATUS
__qdf_dev_set_irq_affinity(uint32_t irnum,struct qdf_cpu_mask * cpmask)123 __qdf_dev_set_irq_affinity(uint32_t irnum, struct qdf_cpu_mask *cpmask)
124 {
125 	int ret;
126 
127 	ret = irq_set_affinity_hint(irnum, (struct cpumask *)cpmask);
128 	return qdf_status_from_os_return(ret);
129 }
130 
131 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
132 /**
133  * __qdf_topology_physical_package_id() - API to retrieve the
134  * cluster info
135  * @cpu: cpu core
136  *
137  * This function returns the cluster information for give cpu
138  * core
139  *
140  * Return: Cluster ID of the CPU
141  */
__qdf_topology_physical_package_id(unsigned int cpu)142 static inline int __qdf_topology_physical_package_id(unsigned int cpu)
143 {
144 	return topology_cluster_id(cpu);
145 }
146 #else
147 /**
148  * __qdf_topology_physical_package_id() - API to retrieve the
149  * cluster info
150  * @cpu: cpu core
151  *
152  * This function returns the cluster information for give cpu
153  * core
154  *
155  * Return: Cluster ID of the CPU
156  */
__qdf_topology_physical_package_id(unsigned int cpu)157 static inline int __qdf_topology_physical_package_id(unsigned int cpu)
158 {
159 	return topology_physical_package_id(cpu);
160 }
161 #endif
162 
163 /**
164  * __qdf_cpumask_subset() - API to check for subset in cpumasks
165  * @srcp1: first cpu mask
166  * @srcp2: second cpu mask
167  *
168  * This checks for *srcp1 & ~*srcp2
169  *
170  * Return: 1 if srcp1 is subset of srcp2 else 0
171  */
__qdf_cpumask_subset(qdf_cpu_mask * srcp1,const qdf_cpu_mask * srcp2)172 static inline int __qdf_cpumask_subset(qdf_cpu_mask *srcp1,
173 				       const qdf_cpu_mask *srcp2)
174 {
175 	return cpumask_subset(srcp1, srcp2);
176 }
177 
178 /**
179  * __qdf_cpumask_intersects() - API to check if cpumasks
180  * intersect
181  * @srcp1: first cpu mask
182  * @srcp2: second cpu mask
183  *
184  * This checks for (*srcp1 & *srcp2) != 0
185  *
186  * Return: 1 if srcp1 and srcp2 intersect else 0
187  */
__qdf_cpumask_intersects(qdf_cpu_mask * srcp1,const qdf_cpu_mask * srcp2)188 static inline int __qdf_cpumask_intersects(qdf_cpu_mask *srcp1,
189 					   const qdf_cpu_mask *srcp2)
190 {
191 	return cpumask_intersects(srcp1, srcp2);
192 }
193 
194 #ifdef CONFIG_SCHED_CORE_CTL
195 /**
196  * __qdf_core_ctl_set_boost() - This API is used to move tasks
197  * to CPUs with higher capacity
198  * @boost: value to set
199  *
200  * This function moves tasks to higher capacity CPUs than those
201  * where the tasks would have  normally ended up. This is
202  * applicable only to defconfig builds.
203  *
204  * Return: 0 on success
205  */
__qdf_core_ctl_set_boost(bool boost)206 static inline int __qdf_core_ctl_set_boost(bool boost)
207 {
208 	return core_ctl_set_boost(boost);
209 }
210 #else
__qdf_core_ctl_set_boost(bool boost)211 static inline int __qdf_core_ctl_set_boost(bool boost)
212 {
213 	return 0;
214 }
215 #endif
216 
217 /**
218  * __qdf_dev_set_irq_status_flags() - set irq status flags
219  * @irnum: irq number
220  * @set: status flag to set
221  *
222  * This function will set the status for an irq
223  *
224  * Return: QDF_STATUS_SUCCESS on success
225  */
226 static inline QDF_STATUS
__qdf_dev_set_irq_status_flags(unsigned int irnum,unsigned long set)227 __qdf_dev_set_irq_status_flags(unsigned int irnum, unsigned long set)
228 {
229 	irq_set_status_flags(irnum, set);
230 
231 	return QDF_STATUS_SUCCESS;
232 }
233 
234 /**
235  * __qdf_dev_clear_irq_status_flags() - clear irq status flags
236  * @irnum: irq number
237  * @clr: status flag to clear
238  *
239  * This function will set the status for an irq
240  *
241  * Return: QDF_STATUS_SUCCESS on success
242  */
243 static inline QDF_STATUS
__qdf_dev_clear_irq_status_flags(unsigned int irnum,unsigned long clr)244 __qdf_dev_clear_irq_status_flags(unsigned int irnum, unsigned long clr)
245 {
246 	irq_clear_status_flags(irnum, clr);
247 
248 	return QDF_STATUS_SUCCESS;
249 }
250 
251 #endif /* __I_QDF_DEV_H */
252