1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * PCI Endpoint *Controller* (EPC) library
4   *
5   * Copyright (C) 2017 Texas Instruments
6   * Author: Kishon Vijay Abraham I <kishon@ti.com>
7   */
8  
9  #include <linux/device.h>
10  #include <linux/slab.h>
11  #include <linux/module.h>
12  
13  #include <linux/pci-epc.h>
14  #include <linux/pci-epf.h>
15  #include <linux/pci-ep-cfs.h>
16  
17  static const struct class pci_epc_class = {
18  	.name = "pci_epc",
19  };
20  
devm_pci_epc_release(struct device * dev,void * res)21  static void devm_pci_epc_release(struct device *dev, void *res)
22  {
23  	struct pci_epc *epc = *(struct pci_epc **)res;
24  
25  	pci_epc_destroy(epc);
26  }
27  
devm_pci_epc_match(struct device * dev,void * res,void * match_data)28  static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
29  {
30  	struct pci_epc **epc = res;
31  
32  	return *epc == match_data;
33  }
34  
35  /**
36   * pci_epc_put() - release the PCI endpoint controller
37   * @epc: epc returned by pci_epc_get()
38   *
39   * release the refcount the caller obtained by invoking pci_epc_get()
40   */
pci_epc_put(struct pci_epc * epc)41  void pci_epc_put(struct pci_epc *epc)
42  {
43  	if (IS_ERR_OR_NULL(epc))
44  		return;
45  
46  	module_put(epc->ops->owner);
47  	put_device(&epc->dev);
48  }
49  EXPORT_SYMBOL_GPL(pci_epc_put);
50  
51  /**
52   * pci_epc_get() - get the PCI endpoint controller
53   * @epc_name: device name of the endpoint controller
54   *
55   * Invoke to get struct pci_epc * corresponding to the device name of the
56   * endpoint controller
57   */
pci_epc_get(const char * epc_name)58  struct pci_epc *pci_epc_get(const char *epc_name)
59  {
60  	int ret = -EINVAL;
61  	struct pci_epc *epc;
62  	struct device *dev;
63  	struct class_dev_iter iter;
64  
65  	class_dev_iter_init(&iter, &pci_epc_class, NULL, NULL);
66  	while ((dev = class_dev_iter_next(&iter))) {
67  		if (strcmp(epc_name, dev_name(dev)))
68  			continue;
69  
70  		epc = to_pci_epc(dev);
71  		if (!try_module_get(epc->ops->owner)) {
72  			ret = -EINVAL;
73  			goto err;
74  		}
75  
76  		class_dev_iter_exit(&iter);
77  		get_device(&epc->dev);
78  		return epc;
79  	}
80  
81  err:
82  	class_dev_iter_exit(&iter);
83  	return ERR_PTR(ret);
84  }
85  EXPORT_SYMBOL_GPL(pci_epc_get);
86  
87  /**
88   * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
89   * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
90   *
91   * Invoke to get the first unreserved BAR that can be used by the endpoint
92   * function.
93   */
94  enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features * epc_features)95  pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
96  {
97  	return pci_epc_get_next_free_bar(epc_features, BAR_0);
98  }
99  EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
100  
101  /**
102   * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
103   * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
104   * @bar: the starting BAR number from where unreserved BAR should be searched
105   *
106   * Invoke to get the next unreserved BAR starting from @bar that can be used
107   * for endpoint function.
108   */
pci_epc_get_next_free_bar(const struct pci_epc_features * epc_features,enum pci_barno bar)109  enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
110  					 *epc_features, enum pci_barno bar)
111  {
112  	int i;
113  
114  	if (!epc_features)
115  		return BAR_0;
116  
117  	/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
118  	if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
119  		bar++;
120  
121  	for (i = bar; i < PCI_STD_NUM_BARS; i++) {
122  		/* If the BAR is not reserved, return it. */
123  		if (epc_features->bar[i].type != BAR_RESERVED)
124  			return i;
125  	}
126  
127  	return NO_BAR;
128  }
129  EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
130  
131  /**
132   * pci_epc_get_features() - get the features supported by EPC
133   * @epc: the features supported by *this* EPC device will be returned
134   * @func_no: the features supported by the EPC device specific to the
135   *	     endpoint function with func_no will be returned
136   * @vfunc_no: the features supported by the EPC device specific to the
137   *	     virtual endpoint function with vfunc_no will be returned
138   *
139   * Invoke to get the features provided by the EPC which may be
140   * specific to an endpoint function. Returns pci_epc_features on success
141   * and NULL for any failures.
142   */
pci_epc_get_features(struct pci_epc * epc,u8 func_no,u8 vfunc_no)143  const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
144  						    u8 func_no, u8 vfunc_no)
145  {
146  	const struct pci_epc_features *epc_features;
147  
148  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
149  		return NULL;
150  
151  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
152  		return NULL;
153  
154  	if (!epc->ops->get_features)
155  		return NULL;
156  
157  	mutex_lock(&epc->lock);
158  	epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
159  	mutex_unlock(&epc->lock);
160  
161  	return epc_features;
162  }
163  EXPORT_SYMBOL_GPL(pci_epc_get_features);
164  
165  /**
166   * pci_epc_stop() - stop the PCI link
167   * @epc: the link of the EPC device that has to be stopped
168   *
169   * Invoke to stop the PCI link
170   */
pci_epc_stop(struct pci_epc * epc)171  void pci_epc_stop(struct pci_epc *epc)
172  {
173  	if (IS_ERR(epc) || !epc->ops->stop)
174  		return;
175  
176  	mutex_lock(&epc->lock);
177  	epc->ops->stop(epc);
178  	mutex_unlock(&epc->lock);
179  }
180  EXPORT_SYMBOL_GPL(pci_epc_stop);
181  
182  /**
183   * pci_epc_start() - start the PCI link
184   * @epc: the link of *this* EPC device has to be started
185   *
186   * Invoke to start the PCI link
187   */
pci_epc_start(struct pci_epc * epc)188  int pci_epc_start(struct pci_epc *epc)
189  {
190  	int ret;
191  
192  	if (IS_ERR(epc))
193  		return -EINVAL;
194  
195  	if (!epc->ops->start)
196  		return 0;
197  
198  	mutex_lock(&epc->lock);
199  	ret = epc->ops->start(epc);
200  	mutex_unlock(&epc->lock);
201  
202  	return ret;
203  }
204  EXPORT_SYMBOL_GPL(pci_epc_start);
205  
206  /**
207   * pci_epc_raise_irq() - interrupt the host system
208   * @epc: the EPC device which has to interrupt the host
209   * @func_no: the physical endpoint function number in the EPC device
210   * @vfunc_no: the virtual endpoint function number in the physical function
211   * @type: specify the type of interrupt; INTX, MSI or MSI-X
212   * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
213   *
214   * Invoke to raise an INTX, MSI or MSI-X interrupt
215   */
pci_epc_raise_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,unsigned int type,u16 interrupt_num)216  int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
217  		      unsigned int type, u16 interrupt_num)
218  {
219  	int ret;
220  
221  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
222  		return -EINVAL;
223  
224  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
225  		return -EINVAL;
226  
227  	if (!epc->ops->raise_irq)
228  		return 0;
229  
230  	mutex_lock(&epc->lock);
231  	ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
232  	mutex_unlock(&epc->lock);
233  
234  	return ret;
235  }
236  EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
237  
238  /**
239   * pci_epc_map_msi_irq() - Map physical address to MSI address and return
240   *                         MSI data
241   * @epc: the EPC device which has the MSI capability
242   * @func_no: the physical endpoint function number in the EPC device
243   * @vfunc_no: the virtual endpoint function number in the physical function
244   * @phys_addr: the physical address of the outbound region
245   * @interrupt_num: the MSI interrupt number with range (1-N)
246   * @entry_size: Size of Outbound address region for each interrupt
247   * @msi_data: the data that should be written in order to raise MSI interrupt
248   *            with interrupt number as 'interrupt num'
249   * @msi_addr_offset: Offset of MSI address from the aligned outbound address
250   *                   to which the MSI address is mapped
251   *
252   * Invoke to map physical address to MSI address and return MSI data. The
253   * physical address should be an address in the outbound region. This is
254   * required to implement doorbell functionality of NTB wherein EPC on either
255   * side of the interface (primary and secondary) can directly write to the
256   * physical address (in outbound region) of the other interface to ring
257   * doorbell.
258   */
pci_epc_map_msi_irq(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u8 interrupt_num,u32 entry_size,u32 * msi_data,u32 * msi_addr_offset)259  int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
260  			phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
261  			u32 *msi_data, u32 *msi_addr_offset)
262  {
263  	int ret;
264  
265  	if (IS_ERR_OR_NULL(epc))
266  		return -EINVAL;
267  
268  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
269  		return -EINVAL;
270  
271  	if (!epc->ops->map_msi_irq)
272  		return -EINVAL;
273  
274  	mutex_lock(&epc->lock);
275  	ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
276  				    interrupt_num, entry_size, msi_data,
277  				    msi_addr_offset);
278  	mutex_unlock(&epc->lock);
279  
280  	return ret;
281  }
282  EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
283  
284  /**
285   * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
286   * @epc: the EPC device to which MSI interrupts was requested
287   * @func_no: the physical endpoint function number in the EPC device
288   * @vfunc_no: the virtual endpoint function number in the physical function
289   *
290   * Invoke to get the number of MSI interrupts allocated by the RC
291   */
pci_epc_get_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no)292  int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
293  {
294  	int interrupt;
295  
296  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
297  		return 0;
298  
299  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
300  		return 0;
301  
302  	if (!epc->ops->get_msi)
303  		return 0;
304  
305  	mutex_lock(&epc->lock);
306  	interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
307  	mutex_unlock(&epc->lock);
308  
309  	if (interrupt < 0)
310  		return 0;
311  
312  	interrupt = 1 << interrupt;
313  
314  	return interrupt;
315  }
316  EXPORT_SYMBOL_GPL(pci_epc_get_msi);
317  
318  /**
319   * pci_epc_set_msi() - set the number of MSI interrupt numbers required
320   * @epc: the EPC device on which MSI has to be configured
321   * @func_no: the physical endpoint function number in the EPC device
322   * @vfunc_no: the virtual endpoint function number in the physical function
323   * @interrupts: number of MSI interrupts required by the EPF
324   *
325   * Invoke to set the required number of MSI interrupts.
326   */
pci_epc_set_msi(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u8 interrupts)327  int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
328  {
329  	int ret;
330  	u8 encode_int;
331  
332  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
333  	    interrupts < 1 || interrupts > 32)
334  		return -EINVAL;
335  
336  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
337  		return -EINVAL;
338  
339  	if (!epc->ops->set_msi)
340  		return 0;
341  
342  	encode_int = order_base_2(interrupts);
343  
344  	mutex_lock(&epc->lock);
345  	ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
346  	mutex_unlock(&epc->lock);
347  
348  	return ret;
349  }
350  EXPORT_SYMBOL_GPL(pci_epc_set_msi);
351  
352  /**
353   * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
354   * @epc: the EPC device to which MSI-X interrupts was requested
355   * @func_no: the physical endpoint function number in the EPC device
356   * @vfunc_no: the virtual endpoint function number in the physical function
357   *
358   * Invoke to get the number of MSI-X interrupts allocated by the RC
359   */
pci_epc_get_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no)360  int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
361  {
362  	int interrupt;
363  
364  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
365  		return 0;
366  
367  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
368  		return 0;
369  
370  	if (!epc->ops->get_msix)
371  		return 0;
372  
373  	mutex_lock(&epc->lock);
374  	interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
375  	mutex_unlock(&epc->lock);
376  
377  	if (interrupt < 0)
378  		return 0;
379  
380  	return interrupt + 1;
381  }
382  EXPORT_SYMBOL_GPL(pci_epc_get_msix);
383  
384  /**
385   * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
386   * @epc: the EPC device on which MSI-X has to be configured
387   * @func_no: the physical endpoint function number in the EPC device
388   * @vfunc_no: the virtual endpoint function number in the physical function
389   * @interrupts: number of MSI-X interrupts required by the EPF
390   * @bir: BAR where the MSI-X table resides
391   * @offset: Offset pointing to the start of MSI-X table
392   *
393   * Invoke to set the required number of MSI-X interrupts.
394   */
pci_epc_set_msix(struct pci_epc * epc,u8 func_no,u8 vfunc_no,u16 interrupts,enum pci_barno bir,u32 offset)395  int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
396  		     u16 interrupts, enum pci_barno bir, u32 offset)
397  {
398  	int ret;
399  
400  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
401  	    interrupts < 1 || interrupts > 2048)
402  		return -EINVAL;
403  
404  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
405  		return -EINVAL;
406  
407  	if (!epc->ops->set_msix)
408  		return 0;
409  
410  	mutex_lock(&epc->lock);
411  	ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
412  				 offset);
413  	mutex_unlock(&epc->lock);
414  
415  	return ret;
416  }
417  EXPORT_SYMBOL_GPL(pci_epc_set_msix);
418  
419  /**
420   * pci_epc_unmap_addr() - unmap CPU address from PCI address
421   * @epc: the EPC device on which address is allocated
422   * @func_no: the physical endpoint function number in the EPC device
423   * @vfunc_no: the virtual endpoint function number in the physical function
424   * @phys_addr: physical address of the local system
425   *
426   * Invoke to unmap the CPU address from PCI address.
427   */
pci_epc_unmap_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr)428  void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
429  			phys_addr_t phys_addr)
430  {
431  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
432  		return;
433  
434  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
435  		return;
436  
437  	if (!epc->ops->unmap_addr)
438  		return;
439  
440  	mutex_lock(&epc->lock);
441  	epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
442  	mutex_unlock(&epc->lock);
443  }
444  EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
445  
446  /**
447   * pci_epc_map_addr() - map CPU address to PCI address
448   * @epc: the EPC device on which address is allocated
449   * @func_no: the physical endpoint function number in the EPC device
450   * @vfunc_no: the virtual endpoint function number in the physical function
451   * @phys_addr: physical address of the local system
452   * @pci_addr: PCI address to which the physical address should be mapped
453   * @size: the size of the allocation
454   *
455   * Invoke to map CPU address with PCI address.
456   */
pci_epc_map_addr(struct pci_epc * epc,u8 func_no,u8 vfunc_no,phys_addr_t phys_addr,u64 pci_addr,size_t size)457  int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
458  		     phys_addr_t phys_addr, u64 pci_addr, size_t size)
459  {
460  	int ret;
461  
462  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
463  		return -EINVAL;
464  
465  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
466  		return -EINVAL;
467  
468  	if (!epc->ops->map_addr)
469  		return 0;
470  
471  	mutex_lock(&epc->lock);
472  	ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
473  				 size);
474  	mutex_unlock(&epc->lock);
475  
476  	return ret;
477  }
478  EXPORT_SYMBOL_GPL(pci_epc_map_addr);
479  
480  /**
481   * pci_epc_clear_bar() - reset the BAR
482   * @epc: the EPC device for which the BAR has to be cleared
483   * @func_no: the physical endpoint function number in the EPC device
484   * @vfunc_no: the virtual endpoint function number in the physical function
485   * @epf_bar: the struct epf_bar that contains the BAR information
486   *
487   * Invoke to reset the BAR of the endpoint device.
488   */
pci_epc_clear_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)489  void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
490  		       struct pci_epf_bar *epf_bar)
491  {
492  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
493  	    (epf_bar->barno == BAR_5 &&
494  	     epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
495  		return;
496  
497  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
498  		return;
499  
500  	if (!epc->ops->clear_bar)
501  		return;
502  
503  	mutex_lock(&epc->lock);
504  	epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
505  	mutex_unlock(&epc->lock);
506  }
507  EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
508  
509  /**
510   * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
511   * @epc: the EPC device on which BAR has to be configured
512   * @func_no: the physical endpoint function number in the EPC device
513   * @vfunc_no: the virtual endpoint function number in the physical function
514   * @epf_bar: the struct epf_bar that contains the BAR information
515   *
516   * Invoke to configure the BAR of the endpoint device.
517   */
pci_epc_set_bar(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_bar * epf_bar)518  int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
519  		    struct pci_epf_bar *epf_bar)
520  {
521  	int ret;
522  	int flags = epf_bar->flags;
523  
524  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
525  	    (epf_bar->barno == BAR_5 &&
526  	     flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
527  	    (flags & PCI_BASE_ADDRESS_SPACE_IO &&
528  	     flags & PCI_BASE_ADDRESS_IO_MASK) ||
529  	    (upper_32_bits(epf_bar->size) &&
530  	     !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
531  		return -EINVAL;
532  
533  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
534  		return -EINVAL;
535  
536  	if (!epc->ops->set_bar)
537  		return 0;
538  
539  	mutex_lock(&epc->lock);
540  	ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
541  	mutex_unlock(&epc->lock);
542  
543  	return ret;
544  }
545  EXPORT_SYMBOL_GPL(pci_epc_set_bar);
546  
547  /**
548   * pci_epc_write_header() - write standard configuration header
549   * @epc: the EPC device to which the configuration header should be written
550   * @func_no: the physical endpoint function number in the EPC device
551   * @vfunc_no: the virtual endpoint function number in the physical function
552   * @header: standard configuration header fields
553   *
554   * Invoke to write the configuration header to the endpoint controller. Every
555   * endpoint controller will have a dedicated location to which the standard
556   * configuration header would be written. The callback function should write
557   * the header fields to this dedicated location.
558   */
pci_epc_write_header(struct pci_epc * epc,u8 func_no,u8 vfunc_no,struct pci_epf_header * header)559  int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
560  			 struct pci_epf_header *header)
561  {
562  	int ret;
563  
564  	if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
565  		return -EINVAL;
566  
567  	if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
568  		return -EINVAL;
569  
570  	/* Only Virtual Function #1 has deviceID */
571  	if (vfunc_no > 1)
572  		return -EINVAL;
573  
574  	if (!epc->ops->write_header)
575  		return 0;
576  
577  	mutex_lock(&epc->lock);
578  	ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
579  	mutex_unlock(&epc->lock);
580  
581  	return ret;
582  }
583  EXPORT_SYMBOL_GPL(pci_epc_write_header);
584  
585  /**
586   * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
587   * @epc: the EPC device to which the endpoint function should be added
588   * @epf: the endpoint function to be added
589   * @type: Identifies if the EPC is connected to the primary or secondary
590   *        interface of EPF
591   *
592   * A PCI endpoint device can have one or more functions. In the case of PCIe,
593   * the specification allows up to 8 PCIe endpoint functions. Invoke
594   * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
595   */
pci_epc_add_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)596  int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
597  		    enum pci_epc_interface_type type)
598  {
599  	struct list_head *list;
600  	u32 func_no;
601  	int ret = 0;
602  
603  	if (IS_ERR_OR_NULL(epc) || epf->is_vf)
604  		return -EINVAL;
605  
606  	if (type == PRIMARY_INTERFACE && epf->epc)
607  		return -EBUSY;
608  
609  	if (type == SECONDARY_INTERFACE && epf->sec_epc)
610  		return -EBUSY;
611  
612  	mutex_lock(&epc->list_lock);
613  	func_no = find_first_zero_bit(&epc->function_num_map,
614  				      BITS_PER_LONG);
615  	if (func_no >= BITS_PER_LONG) {
616  		ret = -EINVAL;
617  		goto ret;
618  	}
619  
620  	if (func_no > epc->max_functions - 1) {
621  		dev_err(&epc->dev, "Exceeding max supported Function Number\n");
622  		ret = -EINVAL;
623  		goto ret;
624  	}
625  
626  	set_bit(func_no, &epc->function_num_map);
627  	if (type == PRIMARY_INTERFACE) {
628  		epf->func_no = func_no;
629  		epf->epc = epc;
630  		list = &epf->list;
631  	} else {
632  		epf->sec_epc_func_no = func_no;
633  		epf->sec_epc = epc;
634  		list = &epf->sec_epc_list;
635  	}
636  
637  	list_add_tail(list, &epc->pci_epf);
638  ret:
639  	mutex_unlock(&epc->list_lock);
640  
641  	return ret;
642  }
643  EXPORT_SYMBOL_GPL(pci_epc_add_epf);
644  
645  /**
646   * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
647   * @epc: the EPC device from which the endpoint function should be removed
648   * @epf: the endpoint function to be removed
649   * @type: identifies if the EPC is connected to the primary or secondary
650   *        interface of EPF
651   *
652   * Invoke to remove PCI endpoint function from the endpoint controller.
653   */
pci_epc_remove_epf(struct pci_epc * epc,struct pci_epf * epf,enum pci_epc_interface_type type)654  void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
655  			enum pci_epc_interface_type type)
656  {
657  	struct list_head *list;
658  	u32 func_no = 0;
659  
660  	if (IS_ERR_OR_NULL(epc) || !epf)
661  		return;
662  
663  	if (type == PRIMARY_INTERFACE) {
664  		func_no = epf->func_no;
665  		list = &epf->list;
666  	} else {
667  		func_no = epf->sec_epc_func_no;
668  		list = &epf->sec_epc_list;
669  	}
670  
671  	mutex_lock(&epc->list_lock);
672  	clear_bit(func_no, &epc->function_num_map);
673  	list_del(list);
674  	epf->epc = NULL;
675  	mutex_unlock(&epc->list_lock);
676  }
677  EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
678  
679  /**
680   * pci_epc_linkup() - Notify the EPF device that EPC device has established a
681   *		      connection with the Root Complex.
682   * @epc: the EPC device which has established link with the host
683   *
684   * Invoke to Notify the EPF device that the EPC device has established a
685   * connection with the Root Complex.
686   */
pci_epc_linkup(struct pci_epc * epc)687  void pci_epc_linkup(struct pci_epc *epc)
688  {
689  	struct pci_epf *epf;
690  
691  	if (IS_ERR_OR_NULL(epc))
692  		return;
693  
694  	mutex_lock(&epc->list_lock);
695  	list_for_each_entry(epf, &epc->pci_epf, list) {
696  		mutex_lock(&epf->lock);
697  		if (epf->event_ops && epf->event_ops->link_up)
698  			epf->event_ops->link_up(epf);
699  		mutex_unlock(&epf->lock);
700  	}
701  	mutex_unlock(&epc->list_lock);
702  }
703  EXPORT_SYMBOL_GPL(pci_epc_linkup);
704  
705  /**
706   * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
707   *			connection with the Root Complex.
708   * @epc: the EPC device which has dropped the link with the host
709   *
710   * Invoke to Notify the EPF device that the EPC device has dropped the
711   * connection with the Root Complex.
712   */
pci_epc_linkdown(struct pci_epc * epc)713  void pci_epc_linkdown(struct pci_epc *epc)
714  {
715  	struct pci_epf *epf;
716  
717  	if (IS_ERR_OR_NULL(epc))
718  		return;
719  
720  	mutex_lock(&epc->list_lock);
721  	list_for_each_entry(epf, &epc->pci_epf, list) {
722  		mutex_lock(&epf->lock);
723  		if (epf->event_ops && epf->event_ops->link_down)
724  			epf->event_ops->link_down(epf);
725  		mutex_unlock(&epf->lock);
726  	}
727  	mutex_unlock(&epc->list_lock);
728  }
729  EXPORT_SYMBOL_GPL(pci_epc_linkdown);
730  
731  /**
732   * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
733   *                         is completed.
734   * @epc: the EPC device whose initialization is completed
735   *
736   * Invoke to Notify the EPF device that the EPC device's initialization
737   * is completed.
738   */
pci_epc_init_notify(struct pci_epc * epc)739  void pci_epc_init_notify(struct pci_epc *epc)
740  {
741  	struct pci_epf *epf;
742  
743  	if (IS_ERR_OR_NULL(epc))
744  		return;
745  
746  	mutex_lock(&epc->list_lock);
747  	list_for_each_entry(epf, &epc->pci_epf, list) {
748  		mutex_lock(&epf->lock);
749  		if (epf->event_ops && epf->event_ops->epc_init)
750  			epf->event_ops->epc_init(epf);
751  		mutex_unlock(&epf->lock);
752  	}
753  	epc->init_complete = true;
754  	mutex_unlock(&epc->list_lock);
755  }
756  EXPORT_SYMBOL_GPL(pci_epc_init_notify);
757  
758  /**
759   * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
760   *                                 complete to the EPF device
761   * @epc: the EPC device whose initialization is pending to be notified
762   * @epf: the EPF device to be notified
763   *
764   * Invoke to notify the pending EPC device initialization complete to the EPF
765   * device. This is used to deliver the notification if the EPC initialization
766   * got completed before the EPF driver bind.
767   */
pci_epc_notify_pending_init(struct pci_epc * epc,struct pci_epf * epf)768  void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
769  {
770  	if (epc->init_complete) {
771  		mutex_lock(&epf->lock);
772  		if (epf->event_ops && epf->event_ops->epc_init)
773  			epf->event_ops->epc_init(epf);
774  		mutex_unlock(&epf->lock);
775  	}
776  }
777  EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
778  
779  /**
780   * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
781   * @epc: the EPC device whose deinitialization is completed
782   *
783   * Invoke to notify the EPF device that the EPC deinitialization is completed.
784   */
pci_epc_deinit_notify(struct pci_epc * epc)785  void pci_epc_deinit_notify(struct pci_epc *epc)
786  {
787  	struct pci_epf *epf;
788  
789  	if (IS_ERR_OR_NULL(epc))
790  		return;
791  
792  	mutex_lock(&epc->list_lock);
793  	list_for_each_entry(epf, &epc->pci_epf, list) {
794  		mutex_lock(&epf->lock);
795  		if (epf->event_ops && epf->event_ops->epc_deinit)
796  			epf->event_ops->epc_deinit(epf);
797  		mutex_unlock(&epf->lock);
798  	}
799  	epc->init_complete = false;
800  	mutex_unlock(&epc->list_lock);
801  }
802  EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
803  
804  /**
805   * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
806   *					device has received the Bus Master
807   *					Enable event from the Root complex
808   * @epc: the EPC device that received the Bus Master Enable event
809   *
810   * Notify the EPF device that the EPC device has generated the Bus Master Enable
811   * event due to host setting the Bus Master Enable bit in the Command register.
812   */
pci_epc_bus_master_enable_notify(struct pci_epc * epc)813  void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
814  {
815  	struct pci_epf *epf;
816  
817  	if (IS_ERR_OR_NULL(epc))
818  		return;
819  
820  	mutex_lock(&epc->list_lock);
821  	list_for_each_entry(epf, &epc->pci_epf, list) {
822  		mutex_lock(&epf->lock);
823  		if (epf->event_ops && epf->event_ops->bus_master_enable)
824  			epf->event_ops->bus_master_enable(epf);
825  		mutex_unlock(&epf->lock);
826  	}
827  	mutex_unlock(&epc->list_lock);
828  }
829  EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
830  
831  /**
832   * pci_epc_destroy() - destroy the EPC device
833   * @epc: the EPC device that has to be destroyed
834   *
835   * Invoke to destroy the PCI EPC device
836   */
pci_epc_destroy(struct pci_epc * epc)837  void pci_epc_destroy(struct pci_epc *epc)
838  {
839  	pci_ep_cfs_remove_epc_group(epc->group);
840  	device_unregister(&epc->dev);
841  
842  #ifdef CONFIG_PCI_DOMAINS_GENERIC
843  	pci_bus_release_domain_nr(&epc->dev, epc->domain_nr);
844  #endif
845  }
846  EXPORT_SYMBOL_GPL(pci_epc_destroy);
847  
848  /**
849   * devm_pci_epc_destroy() - destroy the EPC device
850   * @dev: device that wants to destroy the EPC
851   * @epc: the EPC device that has to be destroyed
852   *
853   * Invoke to destroy the devres associated with this
854   * pci_epc and destroy the EPC device.
855   */
devm_pci_epc_destroy(struct device * dev,struct pci_epc * epc)856  void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
857  {
858  	int r;
859  
860  	r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
861  			   epc);
862  	dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
863  }
864  EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
865  
pci_epc_release(struct device * dev)866  static void pci_epc_release(struct device *dev)
867  {
868  	kfree(to_pci_epc(dev));
869  }
870  
871  /**
872   * __pci_epc_create() - create a new endpoint controller (EPC) device
873   * @dev: device that is creating the new EPC
874   * @ops: function pointers for performing EPC operations
875   * @owner: the owner of the module that creates the EPC device
876   *
877   * Invoke to create a new EPC device and add it to pci_epc class.
878   */
879  struct pci_epc *
__pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)880  __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
881  		 struct module *owner)
882  {
883  	int ret;
884  	struct pci_epc *epc;
885  
886  	if (WARN_ON(!dev)) {
887  		ret = -EINVAL;
888  		goto err_ret;
889  	}
890  
891  	epc = kzalloc(sizeof(*epc), GFP_KERNEL);
892  	if (!epc) {
893  		ret = -ENOMEM;
894  		goto err_ret;
895  	}
896  
897  	mutex_init(&epc->lock);
898  	mutex_init(&epc->list_lock);
899  	INIT_LIST_HEAD(&epc->pci_epf);
900  
901  	device_initialize(&epc->dev);
902  	epc->dev.class = &pci_epc_class;
903  	epc->dev.parent = dev;
904  	epc->dev.release = pci_epc_release;
905  	epc->ops = ops;
906  
907  #ifdef CONFIG_PCI_DOMAINS_GENERIC
908  	epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
909  #else
910  	/*
911  	 * TODO: If the architecture doesn't support generic PCI
912  	 * domains, then a custom implementation has to be used.
913  	 */
914  	WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
915  #endif
916  
917  	ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
918  	if (ret)
919  		goto put_dev;
920  
921  	ret = device_add(&epc->dev);
922  	if (ret)
923  		goto put_dev;
924  
925  	epc->group = pci_ep_cfs_add_epc_group(dev_name(dev));
926  
927  	return epc;
928  
929  put_dev:
930  	put_device(&epc->dev);
931  
932  err_ret:
933  	return ERR_PTR(ret);
934  }
935  EXPORT_SYMBOL_GPL(__pci_epc_create);
936  
937  /**
938   * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
939   * @dev: device that is creating the new EPC
940   * @ops: function pointers for performing EPC operations
941   * @owner: the owner of the module that creates the EPC device
942   *
943   * Invoke to create a new EPC device and add it to pci_epc class.
944   * While at that, it also associates the device with the pci_epc using devres.
945   * On driver detach, release function is invoked on the devres data,
946   * then, devres data is freed.
947   */
948  struct pci_epc *
__devm_pci_epc_create(struct device * dev,const struct pci_epc_ops * ops,struct module * owner)949  __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
950  		      struct module *owner)
951  {
952  	struct pci_epc **ptr, *epc;
953  
954  	ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
955  	if (!ptr)
956  		return ERR_PTR(-ENOMEM);
957  
958  	epc = __pci_epc_create(dev, ops, owner);
959  	if (!IS_ERR(epc)) {
960  		*ptr = epc;
961  		devres_add(dev, ptr);
962  	} else {
963  		devres_free(ptr);
964  	}
965  
966  	return epc;
967  }
968  EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
969  
pci_epc_init(void)970  static int __init pci_epc_init(void)
971  {
972  	return class_register(&pci_epc_class);
973  }
974  module_init(pci_epc_init);
975  
pci_epc_exit(void)976  static void __exit pci_epc_exit(void)
977  {
978  	class_unregister(&pci_epc_class);
979  }
980  module_exit(pci_epc_exit);
981  
982  MODULE_DESCRIPTION("PCI EPC Library");
983  MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
984