1  // SPDX-License-Identifier: GPL-2.0
2  /* Marvell RVU Admin Function driver
3   *
4   * Copyright (C) 2018 Marvell.
5   *
6   */
7  
8  #include <linux/module.h>
9  #include <linux/interrupt.h>
10  #include <linux/pci.h>
11  
12  #include "rvu_reg.h"
13  #include "mbox.h"
14  #include "rvu_trace.h"
15  
16  static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
17  
__otx2_mbox_reset(struct otx2_mbox * mbox,int devid)18  void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
19  {
20  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
21  	struct mbox_hdr *tx_hdr, *rx_hdr;
22  	void *hw_mbase = mdev->hwbase;
23  
24  	tx_hdr = hw_mbase + mbox->tx_start;
25  	rx_hdr = hw_mbase + mbox->rx_start;
26  
27  	mdev->msg_size = 0;
28  	mdev->rsp_size = 0;
29  	tx_hdr->num_msgs = 0;
30  	tx_hdr->msg_size = 0;
31  	rx_hdr->num_msgs = 0;
32  	rx_hdr->msg_size = 0;
33  }
34  EXPORT_SYMBOL(__otx2_mbox_reset);
35  
otx2_mbox_reset(struct otx2_mbox * mbox,int devid)36  void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
37  {
38  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
39  
40  	spin_lock(&mdev->mbox_lock);
41  	__otx2_mbox_reset(mbox, devid);
42  	spin_unlock(&mdev->mbox_lock);
43  }
44  EXPORT_SYMBOL(otx2_mbox_reset);
45  
otx2_mbox_destroy(struct otx2_mbox * mbox)46  void otx2_mbox_destroy(struct otx2_mbox *mbox)
47  {
48  	mbox->reg_base = NULL;
49  	mbox->hwbase = NULL;
50  
51  	kfree(mbox->dev);
52  	mbox->dev = NULL;
53  }
54  EXPORT_SYMBOL(otx2_mbox_destroy);
55  
otx2_mbox_setup(struct otx2_mbox * mbox,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)56  static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
57  			   void *reg_base, int direction, int ndevs)
58  {
59  	switch (direction) {
60  	case MBOX_DIR_AFPF:
61  	case MBOX_DIR_PFVF:
62  		mbox->tx_start = MBOX_DOWN_TX_START;
63  		mbox->rx_start = MBOX_DOWN_RX_START;
64  		mbox->tx_size  = MBOX_DOWN_TX_SIZE;
65  		mbox->rx_size  = MBOX_DOWN_RX_SIZE;
66  		break;
67  	case MBOX_DIR_PFAF:
68  	case MBOX_DIR_VFPF:
69  		mbox->tx_start = MBOX_DOWN_RX_START;
70  		mbox->rx_start = MBOX_DOWN_TX_START;
71  		mbox->tx_size  = MBOX_DOWN_RX_SIZE;
72  		mbox->rx_size  = MBOX_DOWN_TX_SIZE;
73  		break;
74  	case MBOX_DIR_AFPF_UP:
75  	case MBOX_DIR_PFVF_UP:
76  		mbox->tx_start = MBOX_UP_TX_START;
77  		mbox->rx_start = MBOX_UP_RX_START;
78  		mbox->tx_size  = MBOX_UP_TX_SIZE;
79  		mbox->rx_size  = MBOX_UP_RX_SIZE;
80  		break;
81  	case MBOX_DIR_PFAF_UP:
82  	case MBOX_DIR_VFPF_UP:
83  		mbox->tx_start = MBOX_UP_RX_START;
84  		mbox->rx_start = MBOX_UP_TX_START;
85  		mbox->tx_size  = MBOX_UP_RX_SIZE;
86  		mbox->rx_size  = MBOX_UP_TX_SIZE;
87  		break;
88  	default:
89  		return -ENODEV;
90  	}
91  
92  	switch (direction) {
93  	case MBOX_DIR_AFPF:
94  	case MBOX_DIR_AFPF_UP:
95  		mbox->trigger = RVU_AF_AFPF_MBOX0;
96  		mbox->tr_shift = 4;
97  		break;
98  	case MBOX_DIR_PFAF:
99  	case MBOX_DIR_PFAF_UP:
100  		mbox->trigger = RVU_PF_PFAF_MBOX1;
101  		mbox->tr_shift = 0;
102  		break;
103  	case MBOX_DIR_PFVF:
104  	case MBOX_DIR_PFVF_UP:
105  		mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
106  		mbox->tr_shift = 12;
107  		break;
108  	case MBOX_DIR_VFPF:
109  	case MBOX_DIR_VFPF_UP:
110  		mbox->trigger = RVU_VF_VFPF_MBOX1;
111  		mbox->tr_shift = 0;
112  		break;
113  	default:
114  		return -ENODEV;
115  	}
116  
117  	mbox->reg_base = reg_base;
118  	mbox->pdev = pdev;
119  
120  	mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
121  	if (!mbox->dev) {
122  		otx2_mbox_destroy(mbox);
123  		return -ENOMEM;
124  	}
125  	mbox->ndevs = ndevs;
126  
127  	return 0;
128  }
129  
otx2_mbox_init(struct otx2_mbox * mbox,void * hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs)130  int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
131  		   void *reg_base, int direction, int ndevs)
132  {
133  	struct otx2_mbox_dev *mdev;
134  	int devid, err;
135  
136  	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
137  	if (err)
138  		return err;
139  
140  	mbox->hwbase = hwbase;
141  
142  	for (devid = 0; devid < ndevs; devid++) {
143  		mdev = &mbox->dev[devid];
144  		mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
145  		mdev->hwbase = mdev->mbase;
146  		spin_lock_init(&mdev->mbox_lock);
147  		/* Init header to reset value */
148  		otx2_mbox_reset(mbox, devid);
149  	}
150  
151  	return 0;
152  }
153  EXPORT_SYMBOL(otx2_mbox_init);
154  
155  /* Initialize mailbox with the set of mailbox region addresses
156   * in the array hwbase.
157   */
otx2_mbox_regions_init(struct otx2_mbox * mbox,void ** hwbase,struct pci_dev * pdev,void * reg_base,int direction,int ndevs,unsigned long * pf_bmap)158  int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
159  			   struct pci_dev *pdev, void *reg_base,
160  			   int direction, int ndevs, unsigned long *pf_bmap)
161  {
162  	struct otx2_mbox_dev *mdev;
163  	int devid, err;
164  
165  	err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
166  	if (err)
167  		return err;
168  
169  	mbox->hwbase = hwbase[0];
170  
171  	for (devid = 0; devid < ndevs; devid++) {
172  		if (!test_bit(devid, pf_bmap))
173  			continue;
174  
175  		mdev = &mbox->dev[devid];
176  		mdev->mbase = hwbase[devid];
177  		mdev->hwbase = hwbase[devid];
178  		spin_lock_init(&mdev->mbox_lock);
179  		/* Init header to reset value */
180  		otx2_mbox_reset(mbox, devid);
181  	}
182  
183  	return 0;
184  }
185  EXPORT_SYMBOL(otx2_mbox_regions_init);
186  
otx2_mbox_wait_for_rsp(struct otx2_mbox * mbox,int devid)187  int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
188  {
189  	unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
190  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
191  	struct device *sender = &mbox->pdev->dev;
192  
193  	while (!time_after(jiffies, timeout)) {
194  		if (mdev->num_msgs == mdev->msgs_acked)
195  			return 0;
196  		usleep_range(800, 1000);
197  	}
198  	dev_dbg(sender, "timed out while waiting for rsp\n");
199  	return -EIO;
200  }
201  EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
202  
otx2_mbox_busy_poll_for_rsp(struct otx2_mbox * mbox,int devid)203  int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
204  {
205  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
206  	unsigned long timeout = jiffies + 1 * HZ;
207  
208  	while (!time_after(jiffies, timeout)) {
209  		if (mdev->num_msgs == mdev->msgs_acked)
210  			return 0;
211  		cpu_relax();
212  	}
213  	return -EIO;
214  }
215  EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
216  
otx2_mbox_msg_send_data(struct otx2_mbox * mbox,int devid,u64 data)217  static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
218  {
219  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
220  	struct mbox_hdr *tx_hdr, *rx_hdr;
221  	void *hw_mbase = mdev->hwbase;
222  	u64 intr_val;
223  
224  	tx_hdr = hw_mbase + mbox->tx_start;
225  	rx_hdr = hw_mbase + mbox->rx_start;
226  
227  	/* If bounce buffer is implemented copy mbox messages from
228  	 * bounce buffer to hw mbox memory.
229  	 */
230  	if (mdev->mbase != hw_mbase)
231  		memcpy(hw_mbase + mbox->tx_start + msgs_offset,
232  		       mdev->mbase + mbox->tx_start + msgs_offset,
233  		       mdev->msg_size);
234  
235  	spin_lock(&mdev->mbox_lock);
236  
237  	tx_hdr->msg_size = mdev->msg_size;
238  
239  	/* Reset header for next messages */
240  	mdev->msg_size = 0;
241  	mdev->rsp_size = 0;
242  	mdev->msgs_acked = 0;
243  
244  	/* Sync mbox data into memory */
245  	smp_wmb();
246  
247  	/* num_msgs != 0 signals to the peer that the buffer has a number of
248  	 * messages.  So this should be written after writing all the messages
249  	 * to the shared memory.
250  	 */
251  	tx_hdr->num_msgs = mdev->num_msgs;
252  	rx_hdr->num_msgs = 0;
253  
254  	trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
255  
256  	spin_unlock(&mdev->mbox_lock);
257  
258  	/* Check if interrupt pending */
259  	intr_val = readq((void __iomem *)mbox->reg_base +
260  		     (mbox->trigger | (devid << mbox->tr_shift)));
261  
262  	intr_val |= data;
263  	/* The interrupt should be fired after num_msgs is written
264  	 * to the shared memory
265  	 */
266  	writeq(intr_val, (void __iomem *)mbox->reg_base +
267  	       (mbox->trigger | (devid << mbox->tr_shift)));
268  }
269  
otx2_mbox_msg_send(struct otx2_mbox * mbox,int devid)270  void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
271  {
272  	otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
273  }
274  EXPORT_SYMBOL(otx2_mbox_msg_send);
275  
otx2_mbox_msg_send_up(struct otx2_mbox * mbox,int devid)276  void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
277  {
278  	otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
279  }
280  EXPORT_SYMBOL(otx2_mbox_msg_send_up);
281  
otx2_mbox_wait_for_zero(struct otx2_mbox * mbox,int devid)282  bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
283  {
284  	u64 data;
285  
286  	data = readq((void __iomem *)mbox->reg_base +
287  		     (mbox->trigger | (devid << mbox->tr_shift)));
288  
289  	/* If data is non-zero wait for ~1ms and return to caller
290  	 * whether data has changed to zero or not after the wait.
291  	 */
292  	if (!data)
293  		return true;
294  
295  	usleep_range(950, 1000);
296  
297  	data = readq((void __iomem *)mbox->reg_base +
298  		     (mbox->trigger | (devid << mbox->tr_shift)));
299  
300  	return data == 0;
301  }
302  EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
303  
otx2_mbox_alloc_msg_rsp(struct otx2_mbox * mbox,int devid,int size,int size_rsp)304  struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
305  					    int size, int size_rsp)
306  {
307  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
308  	struct mbox_msghdr *msghdr = NULL;
309  
310  	spin_lock(&mdev->mbox_lock);
311  	size = ALIGN(size, MBOX_MSG_ALIGN);
312  	size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
313  	/* Check if there is space in mailbox */
314  	if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
315  		goto exit;
316  	if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
317  		goto exit;
318  
319  	if (mdev->msg_size == 0)
320  		mdev->num_msgs = 0;
321  	mdev->num_msgs++;
322  
323  	msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
324  
325  	/* Clear the whole msg region */
326  	memset(msghdr, 0, size);
327  	/* Init message header with reset values */
328  	msghdr->ver = OTX2_MBOX_VERSION;
329  	mdev->msg_size += size;
330  	mdev->rsp_size += size_rsp;
331  	msghdr->next_msgoff = mdev->msg_size + msgs_offset;
332  exit:
333  	spin_unlock(&mdev->mbox_lock);
334  
335  	return msghdr;
336  }
337  EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
338  
otx2_mbox_get_rsp(struct otx2_mbox * mbox,int devid,struct mbox_msghdr * msg)339  struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
340  				      struct mbox_msghdr *msg)
341  {
342  	unsigned long imsg = mbox->tx_start + msgs_offset;
343  	unsigned long irsp = mbox->rx_start + msgs_offset;
344  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
345  	u16 msgs;
346  
347  	spin_lock(&mdev->mbox_lock);
348  
349  	if (mdev->num_msgs != mdev->msgs_acked)
350  		goto error;
351  
352  	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
353  		struct mbox_msghdr *pmsg = mdev->mbase + imsg;
354  		struct mbox_msghdr *prsp = mdev->mbase + irsp;
355  
356  		if (msg == pmsg) {
357  			if (pmsg->id != prsp->id)
358  				goto error;
359  			spin_unlock(&mdev->mbox_lock);
360  			return prsp;
361  		}
362  
363  		imsg = mbox->tx_start + pmsg->next_msgoff;
364  		irsp = mbox->rx_start + prsp->next_msgoff;
365  	}
366  
367  error:
368  	spin_unlock(&mdev->mbox_lock);
369  	return ERR_PTR(-ENODEV);
370  }
371  EXPORT_SYMBOL(otx2_mbox_get_rsp);
372  
otx2_mbox_check_rsp_msgs(struct otx2_mbox * mbox,int devid)373  int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
374  {
375  	unsigned long ireq = mbox->tx_start + msgs_offset;
376  	unsigned long irsp = mbox->rx_start + msgs_offset;
377  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
378  	int rc = -ENODEV;
379  	u16 msgs;
380  
381  	spin_lock(&mdev->mbox_lock);
382  
383  	if (mdev->num_msgs != mdev->msgs_acked)
384  		goto exit;
385  
386  	for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
387  		struct mbox_msghdr *preq = mdev->mbase + ireq;
388  		struct mbox_msghdr *prsp = mdev->mbase + irsp;
389  
390  		if (preq->id != prsp->id) {
391  			trace_otx2_msg_check(mbox->pdev, preq->id,
392  					     prsp->id, prsp->rc);
393  			goto exit;
394  		}
395  		if (prsp->rc) {
396  			rc = prsp->rc;
397  			trace_otx2_msg_check(mbox->pdev, preq->id,
398  					     prsp->id, prsp->rc);
399  			goto exit;
400  		}
401  
402  		ireq = mbox->tx_start + preq->next_msgoff;
403  		irsp = mbox->rx_start + prsp->next_msgoff;
404  	}
405  	rc = 0;
406  exit:
407  	spin_unlock(&mdev->mbox_lock);
408  	return rc;
409  }
410  EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
411  
412  int
otx2_reply_invalid_msg(struct otx2_mbox * mbox,int devid,u16 pcifunc,u16 id)413  otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
414  {
415  	struct msg_rsp *rsp;
416  
417  	rsp = (struct msg_rsp *)
418  	       otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
419  	if (!rsp)
420  		return -ENOMEM;
421  	rsp->hdr.id = id;
422  	rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
423  	rsp->hdr.rc = MBOX_MSG_INVALID;
424  	rsp->hdr.pcifunc = pcifunc;
425  	return 0;
426  }
427  EXPORT_SYMBOL(otx2_reply_invalid_msg);
428  
otx2_mbox_nonempty(struct otx2_mbox * mbox,int devid)429  bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
430  {
431  	struct otx2_mbox_dev *mdev = &mbox->dev[devid];
432  	bool ret;
433  
434  	spin_lock(&mdev->mbox_lock);
435  	ret = mdev->num_msgs != 0;
436  	spin_unlock(&mdev->mbox_lock);
437  
438  	return ret;
439  }
440  EXPORT_SYMBOL(otx2_mbox_nonempty);
441  
otx2_mbox_id2name(u16 id)442  const char *otx2_mbox_id2name(u16 id)
443  {
444  	switch (id) {
445  #define M(_name, _id, _1, _2, _3) case _id: return # _name;
446  	MBOX_MESSAGES
447  #undef M
448  	default:
449  		return "INVALID ID";
450  	}
451  }
452  EXPORT_SYMBOL(otx2_mbox_id2name);
453  
454  MODULE_AUTHOR("Marvell.");
455  MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
456  MODULE_LICENSE("GPL v2");
457