1 /*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: pv-drivers@vmware.com
24 *
25 */
26
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
29
30 #include "vmxnet3_int.h"
31 #include "vmxnet3_xdp.h"
32
33 char vmxnet3_driver_name[] = "vmxnet3";
34 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
35
36 /*
37 * PCI Device ID Table
38 * Last entry must be all 0s
39 */
40 static const struct pci_device_id vmxnet3_pciid_table[] = {
41 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
42 {0}
43 };
44
45 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
46
47 static int enable_mq = 1;
48
49 static void
50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
51
52 /*
53 * Enable/Disable the given intr
54 */
55 static void
vmxnet3_enable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
57 {
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
59 }
60
61
62 static void
vmxnet3_disable_intr(struct vmxnet3_adapter * adapter,unsigned intr_idx)63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
64 {
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
66 }
67
68
69 /*
70 * Enable/Disable all intrs used by the device
71 */
72 static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter * adapter)73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
74 {
75 int i;
76
77 for (i = 0; i < adapter->intr.num_intrs; i++)
78 vmxnet3_enable_intr(adapter, i);
79 if (!VMXNET3_VERSION_GE_6(adapter) ||
80 !adapter->queuesExtEnabled) {
81 adapter->shared->devRead.intrConf.intrCtrl &=
82 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
83 } else {
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
85 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
86 }
87 }
88
89
90 static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter * adapter)91 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
92 {
93 int i;
94
95 if (!VMXNET3_VERSION_GE_6(adapter) ||
96 !adapter->queuesExtEnabled) {
97 adapter->shared->devRead.intrConf.intrCtrl |=
98 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
99 } else {
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
101 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
102 }
103 for (i = 0; i < adapter->intr.num_intrs; i++)
104 vmxnet3_disable_intr(adapter, i);
105 }
106
107
108 static void
vmxnet3_ack_events(struct vmxnet3_adapter * adapter,u32 events)109 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
110 {
111 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
112 }
113
114
115 static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
117 {
118 return tq->stopped;
119 }
120
121
122 static void
vmxnet3_tq_start(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124 {
125 tq->stopped = false;
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
127 }
128
129
130 static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
132 {
133 tq->stopped = false;
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
135 }
136
137
138 static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
140 {
141 tq->stopped = true;
142 tq->num_stop++;
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
144 }
145
146 static u64
vmxnet3_get_cycles(int pmc)147 vmxnet3_get_cycles(int pmc)
148 {
149 #ifdef CONFIG_X86
150 return native_read_pmc(pmc);
151 #else
152 return 0;
153 #endif
154 }
155
156 static bool
vmxnet3_apply_timestamp(struct vmxnet3_tx_queue * tq,u16 rate)157 vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
158 {
159 #ifdef CONFIG_X86
160 if (rate > 0) {
161 if (tq->tsPktCount == 1) {
162 if (rate != 1)
163 tq->tsPktCount = rate;
164 return true;
165 }
166 tq->tsPktCount--;
167 }
168 #endif
169 return false;
170 }
171
172 /* Check if capability is supported by UPT device or
173 * UPT is even requested
174 */
175 bool
vmxnet3_check_ptcapability(u32 cap_supported,u32 cap)176 vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
177 {
178 if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
179 cap_supported & (1UL << cap)) {
180 return true;
181 }
182
183 return false;
184 }
185
186
187 /*
188 * Check the link state. This may start or stop the tx queue.
189 */
190 static void
vmxnet3_check_link(struct vmxnet3_adapter * adapter,bool affectTxQueue)191 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
192 {
193 u32 ret;
194 int i;
195 unsigned long flags;
196
197 spin_lock_irqsave(&adapter->cmd_lock, flags);
198 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
199 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
200 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
201
202 adapter->link_speed = ret >> 16;
203 if (ret & 1) { /* Link is up. */
204 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
205 adapter->link_speed);
206 netif_carrier_on(adapter->netdev);
207
208 if (affectTxQueue) {
209 for (i = 0; i < adapter->num_tx_queues; i++)
210 vmxnet3_tq_start(&adapter->tx_queue[i],
211 adapter);
212 }
213 } else {
214 netdev_info(adapter->netdev, "NIC Link is Down\n");
215 netif_carrier_off(adapter->netdev);
216
217 if (affectTxQueue) {
218 for (i = 0; i < adapter->num_tx_queues; i++)
219 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
220 }
221 }
222 }
223
224 static void
vmxnet3_process_events(struct vmxnet3_adapter * adapter)225 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
226 {
227 int i;
228 unsigned long flags;
229 u32 events = le32_to_cpu(adapter->shared->ecr);
230 if (!events)
231 return;
232
233 vmxnet3_ack_events(adapter, events);
234
235 /* Check if link state has changed */
236 if (events & VMXNET3_ECR_LINK)
237 vmxnet3_check_link(adapter, true);
238
239 /* Check if there is an error on xmit/recv queues */
240 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
241 spin_lock_irqsave(&adapter->cmd_lock, flags);
242 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
243 VMXNET3_CMD_GET_QUEUE_STATUS);
244 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
245
246 for (i = 0; i < adapter->num_tx_queues; i++)
247 if (adapter->tqd_start[i].status.stopped)
248 dev_err(&adapter->netdev->dev,
249 "%s: tq[%d] error 0x%x\n",
250 adapter->netdev->name, i, le32_to_cpu(
251 adapter->tqd_start[i].status.error));
252 for (i = 0; i < adapter->num_rx_queues; i++)
253 if (adapter->rqd_start[i].status.stopped)
254 dev_err(&adapter->netdev->dev,
255 "%s: rq[%d] error 0x%x\n",
256 adapter->netdev->name, i,
257 adapter->rqd_start[i].status.error);
258
259 schedule_work(&adapter->work);
260 }
261 }
262
263 #ifdef __BIG_ENDIAN_BITFIELD
264 /*
265 * The device expects the bitfields in shared structures to be written in
266 * little endian. When CPU is big endian, the following routines are used to
267 * correctly read and write into ABI.
268 * The general technique used here is : double word bitfields are defined in
269 * opposite order for big endian architecture. Then before reading them in
270 * driver the complete double word is translated using le32_to_cpu. Similarly
271 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
272 * double words into required format.
273 * In order to avoid touching bits in shared structure more than once, temporary
274 * descriptors are used. These are passed as srcDesc to following functions.
275 */
vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc * srcDesc,struct Vmxnet3_RxDesc * dstDesc)276 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
277 struct Vmxnet3_RxDesc *dstDesc)
278 {
279 u32 *src = (u32 *)srcDesc + 2;
280 u32 *dst = (u32 *)dstDesc + 2;
281 dstDesc->addr = le64_to_cpu(srcDesc->addr);
282 *dst = le32_to_cpu(*src);
283 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
284 }
285
vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc * srcDesc,struct Vmxnet3_TxDesc * dstDesc)286 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
287 struct Vmxnet3_TxDesc *dstDesc)
288 {
289 int i;
290 u32 *src = (u32 *)(srcDesc + 1);
291 u32 *dst = (u32 *)(dstDesc + 1);
292
293 /* Working backwards so that the gen bit is set at the end. */
294 for (i = 2; i > 0; i--) {
295 src--;
296 dst--;
297 *dst = cpu_to_le32(*src);
298 }
299 }
300
301
vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc * srcDesc,struct Vmxnet3_RxCompDesc * dstDesc)302 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
303 struct Vmxnet3_RxCompDesc *dstDesc)
304 {
305 int i = 0;
306 u32 *src = (u32 *)srcDesc;
307 u32 *dst = (u32 *)dstDesc;
308 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
309 *dst = le32_to_cpu(*src);
310 src++;
311 dst++;
312 }
313 }
314
315
316 /* Used to read bitfield values from double words. */
get_bitfield32(const __le32 * bitfield,u32 pos,u32 size)317 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
318 {
319 u32 temp = le32_to_cpu(*bitfield);
320 u32 mask = ((1 << size) - 1) << pos;
321 temp &= mask;
322 temp >>= pos;
323 return temp;
324 }
325
326
327
328 #endif /* __BIG_ENDIAN_BITFIELD */
329
330 #ifdef __BIG_ENDIAN_BITFIELD
331
332 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
333 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
334 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
335 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
336 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
337 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
338 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
339 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
340 VMXNET3_TCD_GEN_SIZE)
341 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
342 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
343 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
344 (dstrcd) = (tmp); \
345 vmxnet3_RxCompToCPU((rcd), (tmp)); \
346 } while (0)
347 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
348 (dstrxd) = (tmp); \
349 vmxnet3_RxDescToCPU((rxd), (tmp)); \
350 } while (0)
351
352 #else
353
354 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
355 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
356 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
357 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
358 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
359 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
360
361 #endif /* __BIG_ENDIAN_BITFIELD */
362
363
364 static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info * tbi,struct pci_dev * pdev)365 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
366 struct pci_dev *pdev)
367 {
368 u32 map_type = tbi->map_type;
369
370 if (map_type & VMXNET3_MAP_SINGLE)
371 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
372 DMA_TO_DEVICE);
373 else if (map_type & VMXNET3_MAP_PAGE)
374 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
375 DMA_TO_DEVICE);
376 else
377 BUG_ON(map_type & ~VMXNET3_MAP_XDP);
378
379 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
380 }
381
382
383 static int
vmxnet3_unmap_pkt(u32 eop_idx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter,struct xdp_frame_bulk * bq)384 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
385 struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
386 struct xdp_frame_bulk *bq)
387 {
388 struct vmxnet3_tx_buf_info *tbi;
389 int entries = 0;
390 u32 map_type;
391
392 /* no out of order completion */
393 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
394 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
395
396 tbi = &tq->buf_info[eop_idx];
397 BUG_ON(!tbi->skb);
398 map_type = tbi->map_type;
399 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
400
401 while (tq->tx_ring.next2comp != eop_idx) {
402 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
403 pdev);
404
405 /* update next2comp w/o tx_lock. Since we are marking more,
406 * instead of less, tx ring entries avail, the worst case is
407 * that the tx routine incorrectly re-queues a pkt due to
408 * insufficient tx ring entries.
409 */
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411 entries++;
412 }
413
414 if (map_type & VMXNET3_MAP_XDP)
415 xdp_return_frame_bulk(tbi->xdpf, bq);
416 else
417 dev_kfree_skb_any(tbi->skb);
418
419 /* xdpf and skb are in an anonymous union. */
420 tbi->skb = NULL;
421
422 return entries;
423 }
424
425
426 static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)427 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
428 struct vmxnet3_adapter *adapter)
429 {
430 union Vmxnet3_GenericDesc *gdesc;
431 struct xdp_frame_bulk bq;
432 int completed = 0;
433
434 xdp_frame_bulk_init(&bq);
435 rcu_read_lock();
436
437 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
438 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
439 /* Prevent any &gdesc->tcd field from being (speculatively)
440 * read before (&gdesc->tcd)->gen is read.
441 */
442 dma_rmb();
443
444 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
445 &gdesc->tcd), tq, adapter->pdev,
446 adapter, &bq);
447
448 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
449 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
450 }
451 xdp_flush_frame_bulk(&bq);
452 rcu_read_unlock();
453
454 if (completed) {
455 spin_lock(&tq->tx_lock);
456 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
457 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
458 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
459 netif_carrier_ok(adapter->netdev))) {
460 vmxnet3_tq_wake(tq, adapter);
461 }
462 spin_unlock(&tq->tx_lock);
463 }
464 return completed;
465 }
466
467
468 static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)469 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
470 struct vmxnet3_adapter *adapter)
471 {
472 struct xdp_frame_bulk bq;
473 u32 map_type;
474 int i;
475
476 xdp_frame_bulk_init(&bq);
477 rcu_read_lock();
478
479 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
480 struct vmxnet3_tx_buf_info *tbi;
481
482 tbi = tq->buf_info + tq->tx_ring.next2comp;
483 map_type = tbi->map_type;
484
485 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
486 if (tbi->skb) {
487 if (map_type & VMXNET3_MAP_XDP)
488 xdp_return_frame_bulk(tbi->xdpf, &bq);
489 else
490 dev_kfree_skb_any(tbi->skb);
491 tbi->skb = NULL;
492 }
493 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
494 }
495
496 xdp_flush_frame_bulk(&bq);
497 rcu_read_unlock();
498
499 /* sanity check, verify all buffers are indeed unmapped */
500 for (i = 0; i < tq->tx_ring.size; i++)
501 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
502
503 tq->tx_ring.gen = VMXNET3_INIT_GEN;
504 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
505
506 tq->comp_ring.gen = VMXNET3_INIT_GEN;
507 tq->comp_ring.next2proc = 0;
508 }
509
510
511 static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)512 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
513 struct vmxnet3_adapter *adapter)
514 {
515 if (tq->tx_ring.base) {
516 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
517 sizeof(struct Vmxnet3_TxDesc),
518 tq->tx_ring.base, tq->tx_ring.basePA);
519 tq->tx_ring.base = NULL;
520 }
521 if (tq->data_ring.base) {
522 dma_free_coherent(&adapter->pdev->dev,
523 tq->data_ring.size * tq->txdata_desc_size,
524 tq->data_ring.base, tq->data_ring.basePA);
525 tq->data_ring.base = NULL;
526 }
527 if (tq->ts_ring.base) {
528 dma_free_coherent(&adapter->pdev->dev,
529 tq->tx_ring.size * tq->tx_ts_desc_size,
530 tq->ts_ring.base, tq->ts_ring.basePA);
531 tq->ts_ring.base = NULL;
532 }
533 if (tq->comp_ring.base) {
534 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
535 sizeof(struct Vmxnet3_TxCompDesc),
536 tq->comp_ring.base, tq->comp_ring.basePA);
537 tq->comp_ring.base = NULL;
538 }
539 kfree(tq->buf_info);
540 tq->buf_info = NULL;
541 }
542
543
544 /* Destroy all tx queues */
545 void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter * adapter)546 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
547 {
548 int i;
549
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
552 }
553
554
555 static void
vmxnet3_tq_init(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)556 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
557 struct vmxnet3_adapter *adapter)
558 {
559 int i;
560
561 /* reset the tx ring contents to 0 and reset the tx ring states */
562 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
563 sizeof(struct Vmxnet3_TxDesc));
564 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
565 tq->tx_ring.gen = VMXNET3_INIT_GEN;
566
567 memset(tq->data_ring.base, 0,
568 tq->data_ring.size * tq->txdata_desc_size);
569
570 if (tq->ts_ring.base)
571 memset(tq->ts_ring.base, 0,
572 tq->tx_ring.size * tq->tx_ts_desc_size);
573
574 /* reset the tx comp ring contents to 0 and reset comp ring states */
575 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
576 sizeof(struct Vmxnet3_TxCompDesc));
577 tq->comp_ring.next2proc = 0;
578 tq->comp_ring.gen = VMXNET3_INIT_GEN;
579
580 /* reset the bookkeeping data */
581 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
582 for (i = 0; i < tq->tx_ring.size; i++)
583 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
584
585 /* stats are not reset */
586 }
587
588
589 static int
vmxnet3_tq_create(struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter)590 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
591 struct vmxnet3_adapter *adapter)
592 {
593 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
594 tq->comp_ring.base || tq->buf_info);
595
596 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
597 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
598 &tq->tx_ring.basePA, GFP_KERNEL);
599 if (!tq->tx_ring.base) {
600 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
601 goto err;
602 }
603
604 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
605 tq->data_ring.size * tq->txdata_desc_size,
606 &tq->data_ring.basePA, GFP_KERNEL);
607 if (!tq->data_ring.base) {
608 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
609 goto err;
610 }
611
612 if (tq->tx_ts_desc_size != 0) {
613 tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
614 tq->tx_ring.size * tq->tx_ts_desc_size,
615 &tq->ts_ring.basePA, GFP_KERNEL);
616 if (!tq->ts_ring.base) {
617 netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
618 tq->tx_ts_desc_size = 0;
619 }
620 } else {
621 tq->ts_ring.base = NULL;
622 }
623
624 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
625 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
626 &tq->comp_ring.basePA, GFP_KERNEL);
627 if (!tq->comp_ring.base) {
628 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
629 goto err;
630 }
631
632 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
633 GFP_KERNEL,
634 dev_to_node(&adapter->pdev->dev));
635 if (!tq->buf_info)
636 goto err;
637
638 return 0;
639
640 err:
641 vmxnet3_tq_destroy(tq, adapter);
642 return -ENOMEM;
643 }
644
645 static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter * adapter)646 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
647 {
648 int i;
649
650 for (i = 0; i < adapter->num_tx_queues; i++)
651 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
652 }
653
654 /*
655 * starting from ring->next2fill, allocate rx buffers for the given ring
656 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
657 * are allocated or allocation fails
658 */
659
660 static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue * rq,u32 ring_idx,int num_to_alloc,struct vmxnet3_adapter * adapter)661 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
662 int num_to_alloc, struct vmxnet3_adapter *adapter)
663 {
664 int num_allocated = 0;
665 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
666 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
667 u32 val;
668
669 while (num_allocated <= num_to_alloc) {
670 struct vmxnet3_rx_buf_info *rbi;
671 union Vmxnet3_GenericDesc *gd;
672
673 rbi = rbi_base + ring->next2fill;
674 gd = ring->base + ring->next2fill;
675 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
676
677 if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
678 void *data = vmxnet3_pp_get_buff(rq->page_pool,
679 &rbi->dma_addr,
680 GFP_KERNEL);
681 if (!data) {
682 rq->stats.rx_buf_alloc_failure++;
683 break;
684 }
685 rbi->page = virt_to_page(data);
686 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
687 } else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
688 if (rbi->skb == NULL) {
689 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
690 rbi->len,
691 GFP_KERNEL);
692 if (unlikely(rbi->skb == NULL)) {
693 rq->stats.rx_buf_alloc_failure++;
694 break;
695 }
696
697 rbi->dma_addr = dma_map_single(
698 &adapter->pdev->dev,
699 rbi->skb->data, rbi->len,
700 DMA_FROM_DEVICE);
701 if (dma_mapping_error(&adapter->pdev->dev,
702 rbi->dma_addr)) {
703 dev_kfree_skb_any(rbi->skb);
704 rbi->skb = NULL;
705 rq->stats.rx_buf_alloc_failure++;
706 break;
707 }
708 } else {
709 /* rx buffer skipped by the device */
710 }
711 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
712 } else {
713 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
714 rbi->len != PAGE_SIZE);
715
716 if (rbi->page == NULL) {
717 rbi->page = alloc_page(GFP_ATOMIC);
718 if (unlikely(rbi->page == NULL)) {
719 rq->stats.rx_buf_alloc_failure++;
720 break;
721 }
722 rbi->dma_addr = dma_map_page(
723 &adapter->pdev->dev,
724 rbi->page, 0, PAGE_SIZE,
725 DMA_FROM_DEVICE);
726 if (dma_mapping_error(&adapter->pdev->dev,
727 rbi->dma_addr)) {
728 put_page(rbi->page);
729 rbi->page = NULL;
730 rq->stats.rx_buf_alloc_failure++;
731 break;
732 }
733 } else {
734 /* rx buffers skipped by the device */
735 }
736 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
737 }
738
739 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
740 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
741 | val | rbi->len);
742
743 /* Fill the last buffer but dont mark it ready, or else the
744 * device will think that the queue is full */
745 if (num_allocated == num_to_alloc) {
746 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
747 break;
748 }
749
750 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
751 num_allocated++;
752 vmxnet3_cmd_ring_adv_next2fill(ring);
753 }
754
755 netdev_dbg(adapter->netdev,
756 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
757 num_allocated, ring->next2fill, ring->next2comp);
758
759 /* so that the device can distinguish a full ring and an empty ring */
760 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
761
762 return num_allocated;
763 }
764
765
766 static void
vmxnet3_append_frag(struct sk_buff * skb,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_buf_info * rbi)767 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
768 struct vmxnet3_rx_buf_info *rbi)
769 {
770 skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
771
772 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
773
774 skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
775 skb->data_len += rcd->len;
776 skb->truesize += PAGE_SIZE;
777 skb_shinfo(skb)->nr_frags++;
778 }
779
780
781 static int
vmxnet3_map_pkt(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_tx_queue * tq,struct pci_dev * pdev,struct vmxnet3_adapter * adapter)782 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
783 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
784 struct vmxnet3_adapter *adapter)
785 {
786 u32 dw2, len;
787 unsigned long buf_offset;
788 int i;
789 union Vmxnet3_GenericDesc *gdesc;
790 struct vmxnet3_tx_buf_info *tbi = NULL;
791
792 BUG_ON(ctx->copy_size > skb_headlen(skb));
793
794 /* use the previous gen bit for the SOP desc */
795 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
796
797 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
798 gdesc = ctx->sop_txd; /* both loops below can be skipped */
799
800 /* no need to map the buffer if headers are copied */
801 if (ctx->copy_size) {
802 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
803 tq->tx_ring.next2fill *
804 tq->txdata_desc_size);
805 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
806 ctx->sop_txd->dword[3] = 0;
807
808 tbi = tq->buf_info + tq->tx_ring.next2fill;
809 tbi->map_type = VMXNET3_MAP_NONE;
810
811 netdev_dbg(adapter->netdev,
812 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
813 tq->tx_ring.next2fill,
814 le64_to_cpu(ctx->sop_txd->txd.addr),
815 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
816 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
817
818 /* use the right gen for non-SOP desc */
819 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
820 }
821
822 /* linear part can use multiple tx desc if it's big */
823 len = skb_headlen(skb) - ctx->copy_size;
824 buf_offset = ctx->copy_size;
825 while (len) {
826 u32 buf_size;
827
828 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
829 buf_size = len;
830 dw2 |= len;
831 } else {
832 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
833 /* spec says that for TxDesc.len, 0 == 2^14 */
834 }
835
836 tbi = tq->buf_info + tq->tx_ring.next2fill;
837 tbi->map_type = VMXNET3_MAP_SINGLE;
838 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
839 skb->data + buf_offset, buf_size,
840 DMA_TO_DEVICE);
841 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
842 return -EFAULT;
843
844 tbi->len = buf_size;
845
846 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
847 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
848
849 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
850 gdesc->dword[2] = cpu_to_le32(dw2);
851 gdesc->dword[3] = 0;
852
853 netdev_dbg(adapter->netdev,
854 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
855 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
856 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
857 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
858 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
859
860 len -= buf_size;
861 buf_offset += buf_size;
862 }
863
864 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
865 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
866 u32 buf_size;
867
868 buf_offset = 0;
869 len = skb_frag_size(frag);
870 while (len) {
871 tbi = tq->buf_info + tq->tx_ring.next2fill;
872 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
873 buf_size = len;
874 dw2 |= len;
875 } else {
876 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
877 /* spec says that for TxDesc.len, 0 == 2^14 */
878 }
879 tbi->map_type = VMXNET3_MAP_PAGE;
880 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
881 buf_offset, buf_size,
882 DMA_TO_DEVICE);
883 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
884 return -EFAULT;
885
886 tbi->len = buf_size;
887
888 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
889 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
890
891 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
892 gdesc->dword[2] = cpu_to_le32(dw2);
893 gdesc->dword[3] = 0;
894
895 netdev_dbg(adapter->netdev,
896 "txd[%u]: 0x%llx %u %u\n",
897 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
898 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
899 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
900 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
901
902 len -= buf_size;
903 buf_offset += buf_size;
904 }
905 }
906
907 ctx->eop_txd = gdesc;
908
909 /* set the last buf_info for the pkt */
910 tbi->skb = skb;
911 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
912 if (tq->tx_ts_desc_size != 0) {
913 ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
914 tbi->sop_idx * tq->tx_ts_desc_size);
915 ctx->ts_txd->ts.tsi = 0;
916 }
917
918 return 0;
919 }
920
921
922 /* Init all tx queues */
923 static void
vmxnet3_tq_init_all(struct vmxnet3_adapter * adapter)924 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
925 {
926 int i;
927
928 for (i = 0; i < adapter->num_tx_queues; i++)
929 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
930 }
931
932
933 /*
934 * parse relevant protocol headers:
935 * For a tso pkt, relevant headers are L2/3/4 including options
936 * For a pkt requesting csum offloading, they are L2/3 and may include L4
937 * if it's a TCP/UDP pkt
938 *
939 * Returns:
940 * -1: error happens during parsing
941 * 0: protocol headers parsed, but too big to be copied
942 * 1: protocol headers parsed and copied
943 *
944 * Other effects:
945 * 1. related *ctx fields are updated.
946 * 2. ctx->copy_size is # of bytes copied
947 * 3. the portion to be copied is guaranteed to be in the linear part
948 *
949 */
950 static int
vmxnet3_parse_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)951 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
952 struct vmxnet3_tx_ctx *ctx,
953 struct vmxnet3_adapter *adapter)
954 {
955 u8 protocol = 0;
956
957 if (ctx->mss) { /* TSO */
958 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
959 ctx->l4_offset = skb_inner_transport_offset(skb);
960 ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
961 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
962 } else {
963 ctx->l4_offset = skb_transport_offset(skb);
964 ctx->l4_hdr_size = tcp_hdrlen(skb);
965 ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
966 }
967 } else {
968 if (skb->ip_summed == CHECKSUM_PARTIAL) {
969 /* For encap packets, skb_checksum_start_offset refers
970 * to inner L4 offset. Thus, below works for encap as
971 * well as non-encap case
972 */
973 ctx->l4_offset = skb_checksum_start_offset(skb);
974
975 if (VMXNET3_VERSION_GE_4(adapter) &&
976 skb->encapsulation) {
977 struct iphdr *iph = inner_ip_hdr(skb);
978
979 if (iph->version == 4) {
980 protocol = iph->protocol;
981 } else {
982 const struct ipv6hdr *ipv6h;
983
984 ipv6h = inner_ipv6_hdr(skb);
985 protocol = ipv6h->nexthdr;
986 }
987 } else {
988 if (ctx->ipv4) {
989 const struct iphdr *iph = ip_hdr(skb);
990
991 protocol = iph->protocol;
992 } else if (ctx->ipv6) {
993 const struct ipv6hdr *ipv6h;
994
995 ipv6h = ipv6_hdr(skb);
996 protocol = ipv6h->nexthdr;
997 }
998 }
999
1000 switch (protocol) {
1001 case IPPROTO_TCP:
1002 ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
1003 tcp_hdrlen(skb);
1004 break;
1005 case IPPROTO_UDP:
1006 ctx->l4_hdr_size = sizeof(struct udphdr);
1007 break;
1008 default:
1009 ctx->l4_hdr_size = 0;
1010 break;
1011 }
1012
1013 ctx->copy_size = min(ctx->l4_offset +
1014 ctx->l4_hdr_size, skb->len);
1015 } else {
1016 ctx->l4_offset = 0;
1017 ctx->l4_hdr_size = 0;
1018 /* copy as much as allowed */
1019 ctx->copy_size = min_t(unsigned int,
1020 tq->txdata_desc_size,
1021 skb_headlen(skb));
1022 }
1023
1024 if (skb->len <= tq->txdata_desc_size)
1025 ctx->copy_size = skb->len;
1026
1027 /* make sure headers are accessible directly */
1028 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
1029 goto err;
1030 }
1031
1032 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
1033 tq->stats.oversized_hdr++;
1034 ctx->copy_size = 0;
1035 return 0;
1036 }
1037
1038 return 1;
1039 err:
1040 return -1;
1041 }
1042
1043 /*
1044 * copy relevant protocol headers to the transmit ring:
1045 * For a tso pkt, relevant headers are L2/3/4 including options
1046 * For a pkt requesting csum offloading, they are L2/3 and may include L4
1047 * if it's a TCP/UDP pkt
1048 *
1049 *
1050 * Note that this requires that vmxnet3_parse_hdr be called first to set the
1051 * appropriate bits in ctx first
1052 */
1053 static void
vmxnet3_copy_hdr(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_tx_ctx * ctx,struct vmxnet3_adapter * adapter)1054 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1055 struct vmxnet3_tx_ctx *ctx,
1056 struct vmxnet3_adapter *adapter)
1057 {
1058 struct Vmxnet3_TxDataDesc *tdd;
1059
1060 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1061 tq->tx_ring.next2fill *
1062 tq->txdata_desc_size);
1063
1064 memcpy(tdd->data, skb->data, ctx->copy_size);
1065 netdev_dbg(adapter->netdev,
1066 "copy %u bytes to dataRing[%u]\n",
1067 ctx->copy_size, tq->tx_ring.next2fill);
1068 }
1069
1070
1071 static void
vmxnet3_prepare_inner_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1072 vmxnet3_prepare_inner_tso(struct sk_buff *skb,
1073 struct vmxnet3_tx_ctx *ctx)
1074 {
1075 struct tcphdr *tcph = inner_tcp_hdr(skb);
1076 struct iphdr *iph = inner_ip_hdr(skb);
1077
1078 if (iph->version == 4) {
1079 iph->check = 0;
1080 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1081 IPPROTO_TCP, 0);
1082 } else {
1083 struct ipv6hdr *iph = inner_ipv6_hdr(skb);
1084
1085 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
1086 IPPROTO_TCP, 0);
1087 }
1088 }
1089
1090 static void
vmxnet3_prepare_tso(struct sk_buff * skb,struct vmxnet3_tx_ctx * ctx)1091 vmxnet3_prepare_tso(struct sk_buff *skb,
1092 struct vmxnet3_tx_ctx *ctx)
1093 {
1094 struct tcphdr *tcph = tcp_hdr(skb);
1095
1096 if (ctx->ipv4) {
1097 struct iphdr *iph = ip_hdr(skb);
1098
1099 iph->check = 0;
1100 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
1101 IPPROTO_TCP, 0);
1102 } else if (ctx->ipv6) {
1103 tcp_v6_gso_csum_prep(skb);
1104 }
1105 }
1106
txd_estimate(const struct sk_buff * skb)1107 static int txd_estimate(const struct sk_buff *skb)
1108 {
1109 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1110 int i;
1111
1112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1113 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1114
1115 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
1116 }
1117 return count;
1118 }
1119
1120 /*
1121 * Transmits a pkt thru a given tq
1122 * Returns:
1123 * NETDEV_TX_OK: descriptors are setup successfully
1124 * NETDEV_TX_OK: error occurred, the pkt is dropped
1125 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
1126 *
1127 * Side-effects:
1128 * 1. tx ring may be changed
1129 * 2. tq stats may be updated accordingly
1130 * 3. shared->txNumDeferred may be updated
1131 */
1132
1133 static int
vmxnet3_tq_xmit(struct sk_buff * skb,struct vmxnet3_tx_queue * tq,struct vmxnet3_adapter * adapter,struct net_device * netdev)1134 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1135 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1136 {
1137 int ret;
1138 u32 count;
1139 int num_pkts;
1140 int tx_num_deferred;
1141 unsigned long flags;
1142 struct vmxnet3_tx_ctx ctx;
1143 union Vmxnet3_GenericDesc *gdesc;
1144 #ifdef __BIG_ENDIAN_BITFIELD
1145 /* Use temporary descriptor to avoid touching bits multiple times */
1146 union Vmxnet3_GenericDesc tempTxDesc;
1147 #endif
1148
1149 count = txd_estimate(skb);
1150
1151 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
1152 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
1153
1154 ctx.mss = skb_shinfo(skb)->gso_size;
1155 if (ctx.mss) {
1156 if (skb_header_cloned(skb)) {
1157 if (unlikely(pskb_expand_head(skb, 0, 0,
1158 GFP_ATOMIC) != 0)) {
1159 tq->stats.drop_tso++;
1160 goto drop_pkt;
1161 }
1162 tq->stats.copy_skb_header++;
1163 }
1164 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1165 /* tso pkts must not use more than
1166 * VMXNET3_MAX_TSO_TXD_PER_PKT entries
1167 */
1168 if (skb_linearize(skb) != 0) {
1169 tq->stats.drop_too_many_frags++;
1170 goto drop_pkt;
1171 }
1172 tq->stats.linearized++;
1173
1174 /* recalculate the # of descriptors to use */
1175 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1176 if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
1177 tq->stats.drop_too_many_frags++;
1178 goto drop_pkt;
1179 }
1180 }
1181 if (skb->encapsulation) {
1182 vmxnet3_prepare_inner_tso(skb, &ctx);
1183 } else {
1184 vmxnet3_prepare_tso(skb, &ctx);
1185 }
1186 } else {
1187 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1188
1189 /* non-tso pkts must not use more than
1190 * VMXNET3_MAX_TXD_PER_PKT entries
1191 */
1192 if (skb_linearize(skb) != 0) {
1193 tq->stats.drop_too_many_frags++;
1194 goto drop_pkt;
1195 }
1196 tq->stats.linearized++;
1197
1198 /* recalculate the # of descriptors to use */
1199 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1200 }
1201 }
1202
1203 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1204 if (ret >= 0) {
1205 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1206 /* hdrs parsed, check against other limits */
1207 if (ctx.mss) {
1208 if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
1209 VMXNET3_MAX_TX_BUF_SIZE)) {
1210 tq->stats.drop_oversized_hdr++;
1211 goto drop_pkt;
1212 }
1213 } else {
1214 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1215 if (unlikely(ctx.l4_offset +
1216 skb->csum_offset >
1217 VMXNET3_MAX_CSUM_OFFSET)) {
1218 tq->stats.drop_oversized_hdr++;
1219 goto drop_pkt;
1220 }
1221 }
1222 }
1223 } else {
1224 tq->stats.drop_hdr_inspect_err++;
1225 goto drop_pkt;
1226 }
1227
1228 spin_lock_irqsave(&tq->tx_lock, flags);
1229
1230 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1231 tq->stats.tx_ring_full++;
1232 netdev_dbg(adapter->netdev,
1233 "tx queue stopped on %s, next2comp %u"
1234 " next2fill %u\n", adapter->netdev->name,
1235 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1236
1237 vmxnet3_tq_stop(tq, adapter);
1238 spin_unlock_irqrestore(&tq->tx_lock, flags);
1239 return NETDEV_TX_BUSY;
1240 }
1241
1242
1243 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1244
1245 /* fill tx descs related to addr & len */
1246 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1247 goto unlock_drop_pkt;
1248
1249 /* setup the EOP desc */
1250 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1251
1252 /* setup the SOP desc */
1253 #ifdef __BIG_ENDIAN_BITFIELD
1254 gdesc = &tempTxDesc;
1255 gdesc->dword[2] = ctx.sop_txd->dword[2];
1256 gdesc->dword[3] = ctx.sop_txd->dword[3];
1257 #else
1258 gdesc = ctx.sop_txd;
1259 #endif
1260 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1261 if (ctx.mss) {
1262 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1263 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1264 if (VMXNET3_VERSION_GE_7(adapter)) {
1265 gdesc->txd.om = VMXNET3_OM_TSO;
1266 gdesc->txd.ext1 = 1;
1267 } else {
1268 gdesc->txd.om = VMXNET3_OM_ENCAP;
1269 }
1270 gdesc->txd.msscof = ctx.mss;
1271
1272 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
1273 gdesc->txd.oco = 1;
1274 } else {
1275 gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
1276 gdesc->txd.om = VMXNET3_OM_TSO;
1277 gdesc->txd.msscof = ctx.mss;
1278 }
1279 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1280 } else {
1281 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1282 if (VMXNET3_VERSION_GE_4(adapter) &&
1283 skb->encapsulation) {
1284 gdesc->txd.hlen = ctx.l4_offset +
1285 ctx.l4_hdr_size;
1286 if (VMXNET3_VERSION_GE_7(adapter)) {
1287 gdesc->txd.om = VMXNET3_OM_CSUM;
1288 gdesc->txd.msscof = ctx.l4_offset +
1289 skb->csum_offset;
1290 gdesc->txd.ext1 = 1;
1291 } else {
1292 gdesc->txd.om = VMXNET3_OM_ENCAP;
1293 gdesc->txd.msscof = 0; /* Reserved */
1294 }
1295 } else {
1296 gdesc->txd.hlen = ctx.l4_offset;
1297 gdesc->txd.om = VMXNET3_OM_CSUM;
1298 gdesc->txd.msscof = ctx.l4_offset +
1299 skb->csum_offset;
1300 }
1301 } else {
1302 gdesc->txd.om = 0;
1303 gdesc->txd.msscof = 0;
1304 }
1305 num_pkts = 1;
1306 }
1307 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1308 tx_num_deferred += num_pkts;
1309
1310 if (skb_vlan_tag_present(skb)) {
1311 gdesc->txd.ti = 1;
1312 gdesc->txd.tci = skb_vlan_tag_get(skb);
1313 }
1314
1315 if (tq->tx_ts_desc_size != 0 &&
1316 adapter->latencyConf->sampleRate != 0) {
1317 if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
1318 ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1319 ctx.ts_txd->ts.tsi = 1;
1320 }
1321 }
1322
1323 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1324 * all other writes to &gdesc->txd.
1325 */
1326 dma_wmb();
1327
1328 /* finally flips the GEN bit of the SOP desc. */
1329 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1330 VMXNET3_TXD_GEN);
1331 #ifdef __BIG_ENDIAN_BITFIELD
1332 /* Finished updating in bitfields of Tx Desc, so write them in original
1333 * place.
1334 */
1335 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1336 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1337 gdesc = ctx.sop_txd;
1338 #endif
1339 netdev_dbg(adapter->netdev,
1340 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1341 (u32)(ctx.sop_txd -
1342 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1343 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1344
1345 spin_unlock_irqrestore(&tq->tx_lock, flags);
1346
1347 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1348 tq->shared->txNumDeferred = 0;
1349 VMXNET3_WRITE_BAR0_REG(adapter,
1350 adapter->tx_prod_offset + tq->qid * 8,
1351 tq->tx_ring.next2fill);
1352 }
1353
1354 return NETDEV_TX_OK;
1355
1356 unlock_drop_pkt:
1357 spin_unlock_irqrestore(&tq->tx_lock, flags);
1358 drop_pkt:
1359 tq->stats.drop_total++;
1360 dev_kfree_skb_any(skb);
1361 return NETDEV_TX_OK;
1362 }
1363
1364 static int
vmxnet3_create_pp(struct vmxnet3_adapter * adapter,struct vmxnet3_rx_queue * rq,int size)1365 vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1366 struct vmxnet3_rx_queue *rq, int size)
1367 {
1368 bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1369 const struct page_pool_params pp_params = {
1370 .order = 0,
1371 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1372 .pool_size = size,
1373 .nid = NUMA_NO_NODE,
1374 .dev = &adapter->pdev->dev,
1375 .offset = VMXNET3_XDP_RX_OFFSET,
1376 .max_len = VMXNET3_XDP_MAX_FRSIZE,
1377 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1378 };
1379 struct page_pool *pp;
1380 int err;
1381
1382 pp = page_pool_create(&pp_params);
1383 if (IS_ERR(pp))
1384 return PTR_ERR(pp);
1385
1386 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1387 rq->napi.napi_id);
1388 if (err < 0)
1389 goto err_free_pp;
1390
1391 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
1392 if (err)
1393 goto err_unregister_rxq;
1394
1395 rq->page_pool = pp;
1396
1397 return 0;
1398
1399 err_unregister_rxq:
1400 xdp_rxq_info_unreg(&rq->xdp_rxq);
1401 err_free_pp:
1402 page_pool_destroy(pp);
1403
1404 return err;
1405 }
1406
1407 void *
vmxnet3_pp_get_buff(struct page_pool * pp,dma_addr_t * dma_addr,gfp_t gfp_mask)1408 vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
1409 gfp_t gfp_mask)
1410 {
1411 struct page *page;
1412
1413 page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
1414 if (unlikely(!page))
1415 return NULL;
1416
1417 *dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
1418
1419 return page_address(page);
1420 }
1421
1422 static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff * skb,struct net_device * netdev)1423 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1424 {
1425 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1426
1427 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1428 return vmxnet3_tq_xmit(skb,
1429 &adapter->tx_queue[skb->queue_mapping],
1430 adapter, netdev);
1431 }
1432
1433
1434 static void
vmxnet3_rx_csum(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1435 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1436 struct sk_buff *skb,
1437 union Vmxnet3_GenericDesc *gdesc)
1438 {
1439 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1440 if (gdesc->rcd.v4 &&
1441 (le32_to_cpu(gdesc->dword[3]) &
1442 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1443 skb->ip_summed = CHECKSUM_UNNECESSARY;
1444 if ((le32_to_cpu(gdesc->dword[0]) &
1445 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1446 skb->csum_level = 1;
1447 }
1448 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1449 !(le32_to_cpu(gdesc->dword[0]) &
1450 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1451 WARN_ON_ONCE(gdesc->rcd.frg &&
1452 !(le32_to_cpu(gdesc->dword[0]) &
1453 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1454 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1455 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1456 skb->ip_summed = CHECKSUM_UNNECESSARY;
1457 if ((le32_to_cpu(gdesc->dword[0]) &
1458 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
1459 skb->csum_level = 1;
1460 }
1461 WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
1462 !(le32_to_cpu(gdesc->dword[0]) &
1463 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1464 WARN_ON_ONCE(gdesc->rcd.frg &&
1465 !(le32_to_cpu(gdesc->dword[0]) &
1466 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
1467 } else {
1468 if (gdesc->rcd.csum) {
1469 skb->csum = htons(gdesc->rcd.csum);
1470 skb->ip_summed = CHECKSUM_PARTIAL;
1471 } else {
1472 skb_checksum_none_assert(skb);
1473 }
1474 }
1475 } else {
1476 skb_checksum_none_assert(skb);
1477 }
1478 }
1479
1480
1481 static void
vmxnet3_rx_error(struct vmxnet3_rx_queue * rq,struct Vmxnet3_RxCompDesc * rcd,struct vmxnet3_rx_ctx * ctx,struct vmxnet3_adapter * adapter)1482 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1483 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1484 {
1485 rq->stats.drop_err++;
1486 if (!rcd->fcs)
1487 rq->stats.drop_fcs++;
1488
1489 rq->stats.drop_total++;
1490
1491 /*
1492 * We do not unmap and chain the rx buffer to the skb.
1493 * We basically pretend this buffer is not used and will be recycled
1494 * by vmxnet3_rq_alloc_rx_buf()
1495 */
1496
1497 /*
1498 * ctx->skb may be NULL if this is the first and the only one
1499 * desc for the pkt
1500 */
1501 if (ctx->skb)
1502 dev_kfree_skb_irq(ctx->skb);
1503
1504 ctx->skb = NULL;
1505 }
1506
1507
1508 static u32
vmxnet3_get_hdr_len(struct vmxnet3_adapter * adapter,struct sk_buff * skb,union Vmxnet3_GenericDesc * gdesc)1509 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1510 union Vmxnet3_GenericDesc *gdesc)
1511 {
1512 u32 hlen, maplen;
1513 union {
1514 void *ptr;
1515 struct ethhdr *eth;
1516 struct vlan_ethhdr *veth;
1517 struct iphdr *ipv4;
1518 struct ipv6hdr *ipv6;
1519 struct tcphdr *tcp;
1520 } hdr;
1521 BUG_ON(gdesc->rcd.tcp == 0);
1522
1523 maplen = skb_headlen(skb);
1524 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1525 return 0;
1526
1527 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
1528 skb->protocol == cpu_to_be16(ETH_P_8021AD))
1529 hlen = sizeof(struct vlan_ethhdr);
1530 else
1531 hlen = sizeof(struct ethhdr);
1532
1533 hdr.eth = eth_hdr(skb);
1534 if (gdesc->rcd.v4) {
1535 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
1536 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
1537 hdr.ptr += hlen;
1538 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1539 hlen = hdr.ipv4->ihl << 2;
1540 hdr.ptr += hdr.ipv4->ihl << 2;
1541 } else if (gdesc->rcd.v6) {
1542 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
1543 hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
1544 hdr.ptr += hlen;
1545 /* Use an estimated value, since we also need to handle
1546 * TSO case.
1547 */
1548 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1549 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1550 hlen = sizeof(struct ipv6hdr);
1551 hdr.ptr += sizeof(struct ipv6hdr);
1552 } else {
1553 /* Non-IP pkt, dont estimate header length */
1554 return 0;
1555 }
1556
1557 if (hlen + sizeof(struct tcphdr) > maplen)
1558 return 0;
1559
1560 return (hlen + (hdr.tcp->doff << 2));
1561 }
1562
1563 static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter,int quota)1564 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1565 struct vmxnet3_adapter *adapter, int quota)
1566 {
1567 u32 rxprod_reg[2] = {
1568 adapter->rx_prod_offset, adapter->rx_prod2_offset
1569 };
1570 u32 num_pkts = 0;
1571 bool skip_page_frags = false;
1572 bool encap_lro = false;
1573 struct Vmxnet3_RxCompDesc *rcd;
1574 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1575 u16 segCnt = 0, mss = 0;
1576 int comp_offset, fill_offset;
1577 #ifdef __BIG_ENDIAN_BITFIELD
1578 struct Vmxnet3_RxDesc rxCmdDesc;
1579 struct Vmxnet3_RxCompDesc rxComp;
1580 #endif
1581 bool need_flush = false;
1582
1583 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1584 &rxComp);
1585 while (rcd->gen == rq->comp_ring.gen) {
1586 struct vmxnet3_rx_buf_info *rbi;
1587 struct sk_buff *skb, *new_skb = NULL;
1588 struct page *new_page = NULL;
1589 dma_addr_t new_dma_addr;
1590 int num_to_alloc;
1591 struct Vmxnet3_RxDesc *rxd;
1592 u32 idx, ring_idx;
1593 struct vmxnet3_cmd_ring *ring = NULL;
1594 if (num_pkts >= quota) {
1595 /* we may stop even before we see the EOP desc of
1596 * the current pkt
1597 */
1598 break;
1599 }
1600
1601 /* Prevent any rcd field from being (speculatively) read before
1602 * rcd->gen is read.
1603 */
1604 dma_rmb();
1605
1606 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1607 rcd->rqID != rq->dataRingQid);
1608 idx = rcd->rxdIdx;
1609 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1610 ring = rq->rx_ring + ring_idx;
1611 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1612 &rxCmdDesc);
1613 rbi = rq->buf_info[ring_idx] + idx;
1614
1615 BUG_ON(rxd->addr != rbi->dma_addr ||
1616 rxd->len != rbi->len);
1617
1618 if (unlikely(rcd->eop && rcd->err)) {
1619 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1620 goto rcd_done;
1621 }
1622
1623 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1624 struct sk_buff *skb_xdp_pass;
1625 int act;
1626
1627 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1628 ctx->skb = NULL;
1629 goto skip_xdp; /* Handle it later. */
1630 }
1631
1632 if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
1633 goto rcd_done;
1634
1635 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1636 &skb_xdp_pass);
1637 if (act == XDP_PASS) {
1638 ctx->skb = skb_xdp_pass;
1639 goto sop_done;
1640 }
1641 ctx->skb = NULL;
1642 need_flush |= act == XDP_REDIRECT;
1643
1644 goto rcd_done;
1645 }
1646 skip_xdp:
1647
1648 if (rcd->sop) { /* first buf of the pkt */
1649 bool rxDataRingUsed;
1650 u16 len;
1651
1652 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1653 (rcd->rqID != rq->qid &&
1654 rcd->rqID != rq->dataRingQid));
1655
1656 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
1657 rbi->buf_type != VMXNET3_RX_BUF_XDP);
1658 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1659
1660 if (unlikely(rcd->len == 0)) {
1661 /* Pretend the rx buffer is skipped. */
1662 BUG_ON(!(rcd->sop && rcd->eop));
1663 netdev_dbg(adapter->netdev,
1664 "rxRing[%u][%u] 0 length\n",
1665 ring_idx, idx);
1666 goto rcd_done;
1667 }
1668
1669 skip_page_frags = false;
1670 ctx->skb = rbi->skb;
1671
1672 if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
1673 struct Vmxnet3_RxTSDesc *ts_rxd;
1674
1675 ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
1676 idx * rq->rx_ts_desc_size);
1677 ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
1678 ts_rxd->ts.tsi = 1;
1679 }
1680
1681 rxDataRingUsed =
1682 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1683 len = rxDataRingUsed ? rcd->len : rbi->len;
1684
1685 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1686 struct sk_buff *skb_xdp_pass;
1687 size_t sz;
1688 int act;
1689
1690 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1691 act = vmxnet3_process_xdp_small(adapter, rq,
1692 &rq->data_ring.base[sz],
1693 rcd->len,
1694 &skb_xdp_pass);
1695 if (act == XDP_PASS) {
1696 ctx->skb = skb_xdp_pass;
1697 goto sop_done;
1698 }
1699 need_flush |= act == XDP_REDIRECT;
1700
1701 goto rcd_done;
1702 }
1703 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1704 len);
1705 if (new_skb == NULL) {
1706 /* Skb allocation failed, do not handover this
1707 * skb to stack. Reuse it. Drop the existing pkt
1708 */
1709 rq->stats.rx_buf_alloc_failure++;
1710 ctx->skb = NULL;
1711 rq->stats.drop_total++;
1712 skip_page_frags = true;
1713 goto rcd_done;
1714 }
1715
1716 if (rxDataRingUsed && adapter->rxdataring_enabled) {
1717 size_t sz;
1718
1719 BUG_ON(rcd->len > rq->data_ring.desc_size);
1720
1721 ctx->skb = new_skb;
1722 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1723 memcpy(new_skb->data,
1724 &rq->data_ring.base[sz], rcd->len);
1725 } else {
1726 ctx->skb = rbi->skb;
1727
1728 new_dma_addr =
1729 dma_map_single(&adapter->pdev->dev,
1730 new_skb->data, rbi->len,
1731 DMA_FROM_DEVICE);
1732 if (dma_mapping_error(&adapter->pdev->dev,
1733 new_dma_addr)) {
1734 dev_kfree_skb(new_skb);
1735 /* Skb allocation failed, do not
1736 * handover this skb to stack. Reuse
1737 * it. Drop the existing pkt.
1738 */
1739 rq->stats.rx_buf_alloc_failure++;
1740 ctx->skb = NULL;
1741 rq->stats.drop_total++;
1742 skip_page_frags = true;
1743 goto rcd_done;
1744 }
1745
1746 dma_unmap_single(&adapter->pdev->dev,
1747 rbi->dma_addr,
1748 rbi->len,
1749 DMA_FROM_DEVICE);
1750
1751 /* Immediate refill */
1752 rbi->skb = new_skb;
1753 rbi->dma_addr = new_dma_addr;
1754 rxd->addr = cpu_to_le64(rbi->dma_addr);
1755 rxd->len = rbi->len;
1756 }
1757
1758 skb_record_rx_queue(ctx->skb, rq->qid);
1759 skb_put(ctx->skb, rcd->len);
1760
1761 if (VMXNET3_VERSION_GE_2(adapter) &&
1762 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1763 struct Vmxnet3_RxCompDescExt *rcdlro;
1764 union Vmxnet3_GenericDesc *gdesc;
1765
1766 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1767 gdesc = (union Vmxnet3_GenericDesc *)rcd;
1768
1769 segCnt = rcdlro->segCnt;
1770 WARN_ON_ONCE(segCnt == 0);
1771 mss = rcdlro->mss;
1772 if (unlikely(segCnt <= 1))
1773 segCnt = 0;
1774 encap_lro = (le32_to_cpu(gdesc->dword[0]) &
1775 (1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
1776 } else {
1777 segCnt = 0;
1778 }
1779 } else {
1780 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1781
1782 /* non SOP buffer must be type 1 in most cases */
1783 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1784 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1785
1786 /* If an sop buffer was dropped, skip all
1787 * following non-sop fragments. They will be reused.
1788 */
1789 if (skip_page_frags)
1790 goto rcd_done;
1791
1792 if (rcd->len) {
1793 new_page = alloc_page(GFP_ATOMIC);
1794 /* Replacement page frag could not be allocated.
1795 * Reuse this page. Drop the pkt and free the
1796 * skb which contained this page as a frag. Skip
1797 * processing all the following non-sop frags.
1798 */
1799 if (unlikely(!new_page)) {
1800 rq->stats.rx_buf_alloc_failure++;
1801 dev_kfree_skb(ctx->skb);
1802 ctx->skb = NULL;
1803 skip_page_frags = true;
1804 goto rcd_done;
1805 }
1806 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1807 new_page,
1808 0, PAGE_SIZE,
1809 DMA_FROM_DEVICE);
1810 if (dma_mapping_error(&adapter->pdev->dev,
1811 new_dma_addr)) {
1812 put_page(new_page);
1813 rq->stats.rx_buf_alloc_failure++;
1814 dev_kfree_skb(ctx->skb);
1815 ctx->skb = NULL;
1816 skip_page_frags = true;
1817 goto rcd_done;
1818 }
1819
1820 dma_unmap_page(&adapter->pdev->dev,
1821 rbi->dma_addr, rbi->len,
1822 DMA_FROM_DEVICE);
1823
1824 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1825
1826 /* Immediate refill */
1827 rbi->page = new_page;
1828 rbi->dma_addr = new_dma_addr;
1829 rxd->addr = cpu_to_le64(rbi->dma_addr);
1830 rxd->len = rbi->len;
1831 }
1832 }
1833
1834
1835 sop_done:
1836 skb = ctx->skb;
1837 if (rcd->eop) {
1838 u32 mtu = adapter->netdev->mtu;
1839 skb->len += skb->data_len;
1840
1841 #ifdef VMXNET3_RSS
1842 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1843 (adapter->netdev->features & NETIF_F_RXHASH)) {
1844 enum pkt_hash_types hash_type;
1845
1846 switch (rcd->rssType) {
1847 case VMXNET3_RCD_RSS_TYPE_IPV4:
1848 case VMXNET3_RCD_RSS_TYPE_IPV6:
1849 hash_type = PKT_HASH_TYPE_L3;
1850 break;
1851 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
1852 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
1853 case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
1854 case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
1855 hash_type = PKT_HASH_TYPE_L4;
1856 break;
1857 default:
1858 hash_type = PKT_HASH_TYPE_L3;
1859 break;
1860 }
1861 skb_set_hash(skb,
1862 le32_to_cpu(rcd->rssHash),
1863 hash_type);
1864 }
1865 #endif
1866 vmxnet3_rx_csum(adapter, skb,
1867 (union Vmxnet3_GenericDesc *)rcd);
1868 skb->protocol = eth_type_trans(skb, adapter->netdev);
1869 if ((!rcd->tcp && !encap_lro) ||
1870 !(adapter->netdev->features & NETIF_F_LRO))
1871 goto not_lro;
1872
1873 if (segCnt != 0 && mss != 0) {
1874 skb_shinfo(skb)->gso_type = rcd->v4 ?
1875 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1876 skb_shinfo(skb)->gso_size = mss;
1877 skb_shinfo(skb)->gso_segs = segCnt;
1878 } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
1879 u32 hlen;
1880
1881 hlen = vmxnet3_get_hdr_len(adapter, skb,
1882 (union Vmxnet3_GenericDesc *)rcd);
1883 if (hlen == 0)
1884 goto not_lro;
1885
1886 skb_shinfo(skb)->gso_type =
1887 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1888 if (segCnt != 0) {
1889 skb_shinfo(skb)->gso_segs = segCnt;
1890 skb_shinfo(skb)->gso_size =
1891 DIV_ROUND_UP(skb->len -
1892 hlen, segCnt);
1893 } else {
1894 skb_shinfo(skb)->gso_size = mtu - hlen;
1895 }
1896 }
1897 not_lro:
1898 if (unlikely(rcd->ts))
1899 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1900
1901 /* Use GRO callback if UPT is enabled */
1902 if ((adapter->netdev->features & NETIF_F_LRO) &&
1903 !rq->shared->updateRxProd)
1904 netif_receive_skb(skb);
1905 else
1906 napi_gro_receive(&rq->napi, skb);
1907
1908 ctx->skb = NULL;
1909 encap_lro = false;
1910 num_pkts++;
1911 }
1912
1913 rcd_done:
1914 /* device may have skipped some rx descs */
1915 ring = rq->rx_ring + ring_idx;
1916 rbi->comp_state = VMXNET3_RXD_COMP_DONE;
1917
1918 comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
1919 fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
1920 idx - ring->next2fill - 1;
1921 if (!ring->isOutOfOrder || fill_offset >= comp_offset)
1922 ring->next2comp = idx;
1923 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1924
1925 /* Ensure that the writes to rxd->gen bits will be observed
1926 * after all other writes to rxd objects.
1927 */
1928 dma_wmb();
1929
1930 while (num_to_alloc) {
1931 rbi = rq->buf_info[ring_idx] + ring->next2fill;
1932 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1933 goto refill_buf;
1934 if (ring_idx == 0) {
1935 /* ring0 Type1 buffers can get skipped; re-fill them */
1936 if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
1937 goto refill_buf;
1938 }
1939 if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
1940 refill_buf:
1941 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1942 &rxCmdDesc);
1943 WARN_ON(!rxd->addr);
1944
1945 /* Recv desc is ready to be used by the device */
1946 rxd->gen = ring->gen;
1947 vmxnet3_cmd_ring_adv_next2fill(ring);
1948 rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
1949 num_to_alloc--;
1950 } else {
1951 /* rx completion hasn't occurred */
1952 ring->isOutOfOrder = 1;
1953 break;
1954 }
1955 }
1956
1957 if (num_to_alloc == 0) {
1958 ring->isOutOfOrder = 0;
1959 }
1960
1961 /* if needed, update the register */
1962 if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
1963 VMXNET3_WRITE_BAR0_REG(adapter,
1964 rxprod_reg[ring_idx] + rq->qid * 8,
1965 ring->next2fill);
1966 }
1967
1968 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1969 vmxnet3_getRxComp(rcd,
1970 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1971 }
1972 if (need_flush)
1973 xdp_do_flush();
1974
1975 return num_pkts;
1976 }
1977
1978
1979 static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)1980 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1981 struct vmxnet3_adapter *adapter)
1982 {
1983 u32 i, ring_idx;
1984 struct Vmxnet3_RxDesc *rxd;
1985
1986 /* ring has already been cleaned up */
1987 if (!rq->rx_ring[0].base)
1988 return;
1989
1990 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1991 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1992 struct vmxnet3_rx_buf_info *rbi;
1993 #ifdef __BIG_ENDIAN_BITFIELD
1994 struct Vmxnet3_RxDesc rxDesc;
1995 #endif
1996
1997 rbi = &rq->buf_info[ring_idx][i];
1998 vmxnet3_getRxDesc(rxd,
1999 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
2000
2001 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2002 rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
2003 page_pool_recycle_direct(rq->page_pool,
2004 rbi->page);
2005 rbi->page = NULL;
2006 } else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
2007 rbi->skb) {
2008 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
2009 rxd->len, DMA_FROM_DEVICE);
2010 dev_kfree_skb(rbi->skb);
2011 rbi->skb = NULL;
2012 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
2013 rbi->page) {
2014 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
2015 rxd->len, DMA_FROM_DEVICE);
2016 put_page(rbi->page);
2017 rbi->page = NULL;
2018 }
2019 }
2020
2021 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
2022 rq->rx_ring[ring_idx].next2fill =
2023 rq->rx_ring[ring_idx].next2comp = 0;
2024 }
2025
2026 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2027 rq->comp_ring.next2proc = 0;
2028 }
2029
2030
2031 static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter * adapter)2032 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
2033 {
2034 int i;
2035
2036 for (i = 0; i < adapter->num_rx_queues; i++)
2037 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
2038 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
2039 }
2040
2041
vmxnet3_rq_destroy(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2042 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
2043 struct vmxnet3_adapter *adapter)
2044 {
2045 int i;
2046 int j;
2047
2048 /* all rx buffers must have already been freed */
2049 for (i = 0; i < 2; i++) {
2050 if (rq->buf_info[i]) {
2051 for (j = 0; j < rq->rx_ring[i].size; j++)
2052 BUG_ON(rq->buf_info[i][j].page != NULL);
2053 }
2054 }
2055
2056
2057 for (i = 0; i < 2; i++) {
2058 if (rq->rx_ring[i].base) {
2059 dma_free_coherent(&adapter->pdev->dev,
2060 rq->rx_ring[i].size
2061 * sizeof(struct Vmxnet3_RxDesc),
2062 rq->rx_ring[i].base,
2063 rq->rx_ring[i].basePA);
2064 rq->rx_ring[i].base = NULL;
2065 }
2066 }
2067
2068 if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
2069 xdp_rxq_info_unreg(&rq->xdp_rxq);
2070 page_pool_destroy(rq->page_pool);
2071 rq->page_pool = NULL;
2072
2073 if (rq->data_ring.base) {
2074 dma_free_coherent(&adapter->pdev->dev,
2075 rq->rx_ring[0].size * rq->data_ring.desc_size,
2076 rq->data_ring.base, rq->data_ring.basePA);
2077 rq->data_ring.base = NULL;
2078 }
2079
2080 if (rq->ts_ring.base) {
2081 dma_free_coherent(&adapter->pdev->dev,
2082 rq->rx_ring[0].size * rq->rx_ts_desc_size,
2083 rq->ts_ring.base, rq->ts_ring.basePA);
2084 rq->ts_ring.base = NULL;
2085 }
2086
2087 if (rq->comp_ring.base) {
2088 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2089 * sizeof(struct Vmxnet3_RxCompDesc),
2090 rq->comp_ring.base, rq->comp_ring.basePA);
2091 rq->comp_ring.base = NULL;
2092 }
2093
2094 kfree(rq->buf_info[0]);
2095 rq->buf_info[0] = NULL;
2096 rq->buf_info[1] = NULL;
2097 }
2098
2099 static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter * adapter)2100 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2101 {
2102 int i;
2103
2104 for (i = 0; i < adapter->num_rx_queues; i++) {
2105 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2106
2107 if (rq->data_ring.base) {
2108 dma_free_coherent(&adapter->pdev->dev,
2109 (rq->rx_ring[0].size *
2110 rq->data_ring.desc_size),
2111 rq->data_ring.base,
2112 rq->data_ring.basePA);
2113 rq->data_ring.base = NULL;
2114 }
2115 rq->data_ring.desc_size = 0;
2116 }
2117 }
2118
2119 static int
vmxnet3_rq_init(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2120 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
2121 struct vmxnet3_adapter *adapter)
2122 {
2123 int i, err;
2124
2125 /* initialize buf_info */
2126 for (i = 0; i < rq->rx_ring[0].size; i++) {
2127
2128 /* 1st buf for a pkt is skbuff or xdp page */
2129 if (i % adapter->rx_buf_per_pkt == 0) {
2130 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2131 VMXNET3_RX_BUF_XDP :
2132 VMXNET3_RX_BUF_SKB;
2133 rq->buf_info[0][i].len = adapter->skb_buf_size;
2134 } else { /* subsequent bufs for a pkt is frag */
2135 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
2136 rq->buf_info[0][i].len = PAGE_SIZE;
2137 }
2138 }
2139 for (i = 0; i < rq->rx_ring[1].size; i++) {
2140 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
2141 rq->buf_info[1][i].len = PAGE_SIZE;
2142 }
2143
2144 /* reset internal state and allocate buffers for both rings */
2145 for (i = 0; i < 2; i++) {
2146 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
2147
2148 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
2149 sizeof(struct Vmxnet3_RxDesc));
2150 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
2151 rq->rx_ring[i].isOutOfOrder = 0;
2152 }
2153
2154 err = vmxnet3_create_pp(adapter, rq,
2155 rq->rx_ring[0].size + rq->rx_ring[1].size);
2156 if (err)
2157 return err;
2158
2159 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
2160 adapter) == 0) {
2161 xdp_rxq_info_unreg(&rq->xdp_rxq);
2162 page_pool_destroy(rq->page_pool);
2163 rq->page_pool = NULL;
2164
2165 /* at least has 1 rx buffer for the 1st ring */
2166 return -ENOMEM;
2167 }
2168 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2169
2170 if (rq->ts_ring.base)
2171 memset(rq->ts_ring.base, 0,
2172 rq->rx_ring[0].size * rq->rx_ts_desc_size);
2173
2174 /* reset the comp ring */
2175 rq->comp_ring.next2proc = 0;
2176 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
2177 sizeof(struct Vmxnet3_RxCompDesc));
2178 rq->comp_ring.gen = VMXNET3_INIT_GEN;
2179
2180 /* reset rxctx */
2181 rq->rx_ctx.skb = NULL;
2182
2183 /* stats are not reset */
2184 return 0;
2185 }
2186
2187
2188 static int
vmxnet3_rq_init_all(struct vmxnet3_adapter * adapter)2189 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2190 {
2191 int i, err = 0;
2192
2193 for (i = 0; i < adapter->num_rx_queues; i++) {
2194 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2195 if (unlikely(err)) {
2196 dev_err(&adapter->netdev->dev, "%s: failed to "
2197 "initialize rx queue%i\n",
2198 adapter->netdev->name, i);
2199 break;
2200 }
2201 }
2202 return err;
2203
2204 }
2205
2206
2207 static int
vmxnet3_rq_create(struct vmxnet3_rx_queue * rq,struct vmxnet3_adapter * adapter)2208 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2209 {
2210 int i;
2211 size_t sz;
2212 struct vmxnet3_rx_buf_info *bi;
2213
2214 for (i = 0; i < 2; i++) {
2215
2216 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
2217 rq->rx_ring[i].base = dma_alloc_coherent(
2218 &adapter->pdev->dev, sz,
2219 &rq->rx_ring[i].basePA,
2220 GFP_KERNEL);
2221 if (!rq->rx_ring[i].base) {
2222 netdev_err(adapter->netdev,
2223 "failed to allocate rx ring %d\n", i);
2224 goto err;
2225 }
2226 }
2227
2228 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2229 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
2230 rq->data_ring.base =
2231 dma_alloc_coherent(&adapter->pdev->dev, sz,
2232 &rq->data_ring.basePA,
2233 GFP_KERNEL);
2234 if (!rq->data_ring.base) {
2235 netdev_err(adapter->netdev,
2236 "rx data ring will be disabled\n");
2237 adapter->rxdataring_enabled = false;
2238 }
2239 } else {
2240 rq->data_ring.base = NULL;
2241 rq->data_ring.desc_size = 0;
2242 }
2243
2244 if (rq->rx_ts_desc_size != 0) {
2245 sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
2246 rq->ts_ring.base =
2247 dma_alloc_coherent(&adapter->pdev->dev, sz,
2248 &rq->ts_ring.basePA,
2249 GFP_KERNEL);
2250 if (!rq->ts_ring.base) {
2251 netdev_err(adapter->netdev,
2252 "rx ts ring will be disabled\n");
2253 rq->rx_ts_desc_size = 0;
2254 }
2255 } else {
2256 rq->ts_ring.base = NULL;
2257 }
2258
2259 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
2260 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2261 &rq->comp_ring.basePA,
2262 GFP_KERNEL);
2263 if (!rq->comp_ring.base) {
2264 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2265 goto err;
2266 }
2267
2268 bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
2269 sizeof(rq->buf_info[0][0]), GFP_KERNEL,
2270 dev_to_node(&adapter->pdev->dev));
2271 if (!bi)
2272 goto err;
2273
2274 rq->buf_info[0] = bi;
2275 rq->buf_info[1] = bi + rq->rx_ring[0].size;
2276
2277 return 0;
2278
2279 err:
2280 vmxnet3_rq_destroy(rq, adapter);
2281 return -ENOMEM;
2282 }
2283
2284
2285 int
vmxnet3_rq_create_all(struct vmxnet3_adapter * adapter)2286 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2287 {
2288 int i, err = 0;
2289
2290 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2291
2292 for (i = 0; i < adapter->num_rx_queues; i++) {
2293 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2294 if (unlikely(err)) {
2295 dev_err(&adapter->netdev->dev,
2296 "%s: failed to create rx queue%i\n",
2297 adapter->netdev->name, i);
2298 goto err_out;
2299 }
2300 }
2301
2302 if (!adapter->rxdataring_enabled)
2303 vmxnet3_rq_destroy_all_rxdataring(adapter);
2304
2305 return err;
2306 err_out:
2307 vmxnet3_rq_destroy_all(adapter);
2308 return err;
2309
2310 }
2311
2312 /* Multiple queue aware polling function for tx and rx */
2313
2314 static int
vmxnet3_do_poll(struct vmxnet3_adapter * adapter,int budget)2315 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2316 {
2317 int rcd_done = 0, i;
2318 if (unlikely(adapter->shared->ecr))
2319 vmxnet3_process_events(adapter);
2320 for (i = 0; i < adapter->num_tx_queues; i++)
2321 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2322
2323 for (i = 0; i < adapter->num_rx_queues; i++)
2324 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2325 adapter, budget);
2326 return rcd_done;
2327 }
2328
2329
2330 static int
vmxnet3_poll(struct napi_struct * napi,int budget)2331 vmxnet3_poll(struct napi_struct *napi, int budget)
2332 {
2333 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
2334 struct vmxnet3_rx_queue, napi);
2335 int rxd_done;
2336
2337 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2338
2339 if (rxd_done < budget) {
2340 napi_complete_done(napi, rxd_done);
2341 vmxnet3_enable_all_intrs(rx_queue->adapter);
2342 }
2343 return rxd_done;
2344 }
2345
2346 /*
2347 * NAPI polling function for MSI-X mode with multiple Rx queues
2348 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
2349 */
2350
2351 static int
vmxnet3_poll_rx_only(struct napi_struct * napi,int budget)2352 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
2353 {
2354 struct vmxnet3_rx_queue *rq = container_of(napi,
2355 struct vmxnet3_rx_queue, napi);
2356 struct vmxnet3_adapter *adapter = rq->adapter;
2357 int rxd_done;
2358
2359 /* When sharing interrupt with corresponding tx queue, process
2360 * tx completions in that queue as well
2361 */
2362 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2363 struct vmxnet3_tx_queue *tq =
2364 &adapter->tx_queue[rq - adapter->rx_queue];
2365 vmxnet3_tq_tx_complete(tq, adapter);
2366 }
2367
2368 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2369
2370 if (rxd_done < budget) {
2371 napi_complete_done(napi, rxd_done);
2372 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2373 }
2374 return rxd_done;
2375 }
2376
2377
2378 #ifdef CONFIG_PCI_MSI
2379
2380 /*
2381 * Handle completion interrupts on tx queues
2382 * Returns whether or not the intr is handled
2383 */
2384
2385 static irqreturn_t
vmxnet3_msix_tx(int irq,void * data)2386 vmxnet3_msix_tx(int irq, void *data)
2387 {
2388 struct vmxnet3_tx_queue *tq = data;
2389 struct vmxnet3_adapter *adapter = tq->adapter;
2390
2391 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2392 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2393
2394 /* Handle the case where only one irq is allocate for all tx queues */
2395 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2396 int i;
2397 for (i = 0; i < adapter->num_tx_queues; i++) {
2398 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2399 vmxnet3_tq_tx_complete(txq, adapter);
2400 }
2401 } else {
2402 vmxnet3_tq_tx_complete(tq, adapter);
2403 }
2404 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2405
2406 return IRQ_HANDLED;
2407 }
2408
2409
2410 /*
2411 * Handle completion interrupts on rx queues. Returns whether or not the
2412 * intr is handled
2413 */
2414
2415 static irqreturn_t
vmxnet3_msix_rx(int irq,void * data)2416 vmxnet3_msix_rx(int irq, void *data)
2417 {
2418 struct vmxnet3_rx_queue *rq = data;
2419 struct vmxnet3_adapter *adapter = rq->adapter;
2420
2421 /* disable intr if needed */
2422 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2423 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2424 napi_schedule(&rq->napi);
2425
2426 return IRQ_HANDLED;
2427 }
2428
2429 /*
2430 *----------------------------------------------------------------------------
2431 *
2432 * vmxnet3_msix_event --
2433 *
2434 * vmxnet3 msix event intr handler
2435 *
2436 * Result:
2437 * whether or not the intr is handled
2438 *
2439 *----------------------------------------------------------------------------
2440 */
2441
2442 static irqreturn_t
vmxnet3_msix_event(int irq,void * data)2443 vmxnet3_msix_event(int irq, void *data)
2444 {
2445 struct net_device *dev = data;
2446 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2447
2448 /* disable intr if needed */
2449 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2450 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2451
2452 if (adapter->shared->ecr)
2453 vmxnet3_process_events(adapter);
2454
2455 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2456
2457 return IRQ_HANDLED;
2458 }
2459
2460 #endif /* CONFIG_PCI_MSI */
2461
2462
2463 /* Interrupt handler for vmxnet3 */
2464 static irqreturn_t
vmxnet3_intr(int irq,void * dev_id)2465 vmxnet3_intr(int irq, void *dev_id)
2466 {
2467 struct net_device *dev = dev_id;
2468 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2469
2470 if (adapter->intr.type == VMXNET3_IT_INTX) {
2471 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2472 if (unlikely(icr == 0))
2473 /* not ours */
2474 return IRQ_NONE;
2475 }
2476
2477
2478 /* disable intr if needed */
2479 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2480 vmxnet3_disable_all_intrs(adapter);
2481
2482 napi_schedule(&adapter->rx_queue[0].napi);
2483
2484 return IRQ_HANDLED;
2485 }
2486
2487 #ifdef CONFIG_NET_POLL_CONTROLLER
2488
2489 /* netpoll callback. */
2490 static void
vmxnet3_netpoll(struct net_device * netdev)2491 vmxnet3_netpoll(struct net_device *netdev)
2492 {
2493 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2494
2495 switch (adapter->intr.type) {
2496 #ifdef CONFIG_PCI_MSI
2497 case VMXNET3_IT_MSIX: {
2498 int i;
2499 for (i = 0; i < adapter->num_rx_queues; i++)
2500 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2501 break;
2502 }
2503 #endif
2504 case VMXNET3_IT_MSI:
2505 default:
2506 vmxnet3_intr(0, adapter->netdev);
2507 break;
2508 }
2509
2510 }
2511 #endif /* CONFIG_NET_POLL_CONTROLLER */
2512
2513 static int
vmxnet3_request_irqs(struct vmxnet3_adapter * adapter)2514 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2515 {
2516 struct vmxnet3_intr *intr = &adapter->intr;
2517 int err = 0, i;
2518 int vector = 0;
2519
2520 #ifdef CONFIG_PCI_MSI
2521 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2522 for (i = 0; i < adapter->num_tx_queues; i++) {
2523 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2524 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2525 adapter->netdev->name, vector);
2526 err = request_irq(
2527 intr->msix_entries[vector].vector,
2528 vmxnet3_msix_tx, 0,
2529 adapter->tx_queue[i].name,
2530 &adapter->tx_queue[i]);
2531 } else {
2532 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2533 adapter->netdev->name, vector);
2534 }
2535 if (err) {
2536 dev_err(&adapter->netdev->dev,
2537 "Failed to request irq for MSIX, %s, "
2538 "error %d\n",
2539 adapter->tx_queue[i].name, err);
2540 return err;
2541 }
2542
2543 /* Handle the case where only 1 MSIx was allocated for
2544 * all tx queues */
2545 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2546 for (; i < adapter->num_tx_queues; i++)
2547 adapter->tx_queue[i].comp_ring.intr_idx
2548 = vector;
2549 vector++;
2550 break;
2551 } else {
2552 adapter->tx_queue[i].comp_ring.intr_idx
2553 = vector++;
2554 }
2555 }
2556 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2557 vector = 0;
2558
2559 for (i = 0; i < adapter->num_rx_queues; i++) {
2560 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2561 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2562 adapter->netdev->name, vector);
2563 else
2564 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2565 adapter->netdev->name, vector);
2566 err = request_irq(intr->msix_entries[vector].vector,
2567 vmxnet3_msix_rx, 0,
2568 adapter->rx_queue[i].name,
2569 &(adapter->rx_queue[i]));
2570 if (err) {
2571 netdev_err(adapter->netdev,
2572 "Failed to request irq for MSIX, "
2573 "%s, error %d\n",
2574 adapter->rx_queue[i].name, err);
2575 return err;
2576 }
2577
2578 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2579 }
2580
2581 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2582 adapter->netdev->name, vector);
2583 err = request_irq(intr->msix_entries[vector].vector,
2584 vmxnet3_msix_event, 0,
2585 intr->event_msi_vector_name, adapter->netdev);
2586 intr->event_intr_idx = vector;
2587
2588 } else if (intr->type == VMXNET3_IT_MSI) {
2589 adapter->num_rx_queues = 1;
2590 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2591 adapter->netdev->name, adapter->netdev);
2592 } else {
2593 #endif
2594 adapter->num_rx_queues = 1;
2595 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2596 IRQF_SHARED, adapter->netdev->name,
2597 adapter->netdev);
2598 #ifdef CONFIG_PCI_MSI
2599 }
2600 #endif
2601 intr->num_intrs = vector + 1;
2602 if (err) {
2603 netdev_err(adapter->netdev,
2604 "Failed to request irq (intr type:%d), error %d\n",
2605 intr->type, err);
2606 } else {
2607 /* Number of rx queues will not change after this */
2608 for (i = 0; i < adapter->num_rx_queues; i++) {
2609 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2610 rq->qid = i;
2611 rq->qid2 = i + adapter->num_rx_queues;
2612 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2613 }
2614
2615 /* init our intr settings */
2616 for (i = 0; i < intr->num_intrs; i++)
2617 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2618 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2619 adapter->intr.event_intr_idx = 0;
2620 for (i = 0; i < adapter->num_tx_queues; i++)
2621 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2622 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2623 }
2624
2625 netdev_info(adapter->netdev,
2626 "intr type %u, mode %u, %u vectors allocated\n",
2627 intr->type, intr->mask_mode, intr->num_intrs);
2628 }
2629
2630 return err;
2631 }
2632
2633
2634 static void
vmxnet3_free_irqs(struct vmxnet3_adapter * adapter)2635 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2636 {
2637 struct vmxnet3_intr *intr = &adapter->intr;
2638 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2639
2640 switch (intr->type) {
2641 #ifdef CONFIG_PCI_MSI
2642 case VMXNET3_IT_MSIX:
2643 {
2644 int i, vector = 0;
2645
2646 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2647 for (i = 0; i < adapter->num_tx_queues; i++) {
2648 free_irq(intr->msix_entries[vector++].vector,
2649 &(adapter->tx_queue[i]));
2650 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2651 break;
2652 }
2653 }
2654
2655 for (i = 0; i < adapter->num_rx_queues; i++) {
2656 free_irq(intr->msix_entries[vector++].vector,
2657 &(adapter->rx_queue[i]));
2658 }
2659
2660 free_irq(intr->msix_entries[vector].vector,
2661 adapter->netdev);
2662 BUG_ON(vector >= intr->num_intrs);
2663 break;
2664 }
2665 #endif
2666 case VMXNET3_IT_MSI:
2667 free_irq(adapter->pdev->irq, adapter->netdev);
2668 break;
2669 case VMXNET3_IT_INTX:
2670 free_irq(adapter->pdev->irq, adapter->netdev);
2671 break;
2672 default:
2673 BUG();
2674 }
2675 }
2676
2677
2678 static void
vmxnet3_restore_vlan(struct vmxnet3_adapter * adapter)2679 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2680 {
2681 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2682 u16 vid;
2683
2684 /* allow untagged pkts */
2685 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2686
2687 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2688 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2689 }
2690
2691
2692 static int
vmxnet3_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2693 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2694 {
2695 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2696
2697 if (!(netdev->flags & IFF_PROMISC)) {
2698 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2699 unsigned long flags;
2700
2701 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2702 spin_lock_irqsave(&adapter->cmd_lock, flags);
2703 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2704 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2705 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2706 }
2707
2708 set_bit(vid, adapter->active_vlans);
2709
2710 return 0;
2711 }
2712
2713
2714 static int
vmxnet3_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2715 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2716 {
2717 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2718
2719 if (!(netdev->flags & IFF_PROMISC)) {
2720 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2721 unsigned long flags;
2722
2723 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2724 spin_lock_irqsave(&adapter->cmd_lock, flags);
2725 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2726 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2727 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2728 }
2729
2730 clear_bit(vid, adapter->active_vlans);
2731
2732 return 0;
2733 }
2734
2735
2736 static u8 *
vmxnet3_copy_mc(struct net_device * netdev)2737 vmxnet3_copy_mc(struct net_device *netdev)
2738 {
2739 u8 *buf = NULL;
2740 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2741
2742 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2743 if (sz <= 0xffff) {
2744 /* We may be called with BH disabled */
2745 buf = kmalloc(sz, GFP_ATOMIC);
2746 if (buf) {
2747 struct netdev_hw_addr *ha;
2748 int i = 0;
2749
2750 netdev_for_each_mc_addr(ha, netdev)
2751 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2752 ETH_ALEN);
2753 }
2754 }
2755 return buf;
2756 }
2757
2758
2759 static void
vmxnet3_set_mc(struct net_device * netdev)2760 vmxnet3_set_mc(struct net_device *netdev)
2761 {
2762 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2763 unsigned long flags;
2764 struct Vmxnet3_RxFilterConf *rxConf =
2765 &adapter->shared->devRead.rxFilterConf;
2766 u8 *new_table = NULL;
2767 dma_addr_t new_table_pa = 0;
2768 bool new_table_pa_valid = false;
2769 u32 new_mode = VMXNET3_RXM_UCAST;
2770
2771 if (netdev->flags & IFF_PROMISC) {
2772 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2773 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2774
2775 new_mode |= VMXNET3_RXM_PROMISC;
2776 } else {
2777 vmxnet3_restore_vlan(adapter);
2778 }
2779
2780 if (netdev->flags & IFF_BROADCAST)
2781 new_mode |= VMXNET3_RXM_BCAST;
2782
2783 if (netdev->flags & IFF_ALLMULTI)
2784 new_mode |= VMXNET3_RXM_ALL_MULTI;
2785 else
2786 if (!netdev_mc_empty(netdev)) {
2787 new_table = vmxnet3_copy_mc(netdev);
2788 if (new_table) {
2789 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2790
2791 rxConf->mfTableLen = cpu_to_le16(sz);
2792 new_table_pa = dma_map_single(
2793 &adapter->pdev->dev,
2794 new_table,
2795 sz,
2796 DMA_TO_DEVICE);
2797 if (!dma_mapping_error(&adapter->pdev->dev,
2798 new_table_pa)) {
2799 new_mode |= VMXNET3_RXM_MCAST;
2800 new_table_pa_valid = true;
2801 rxConf->mfTablePA = cpu_to_le64(
2802 new_table_pa);
2803 }
2804 }
2805 if (!new_table_pa_valid) {
2806 netdev_info(netdev,
2807 "failed to copy mcast list, setting ALL_MULTI\n");
2808 new_mode |= VMXNET3_RXM_ALL_MULTI;
2809 }
2810 }
2811
2812 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2813 rxConf->mfTableLen = 0;
2814 rxConf->mfTablePA = 0;
2815 }
2816
2817 spin_lock_irqsave(&adapter->cmd_lock, flags);
2818 if (new_mode != rxConf->rxMode) {
2819 rxConf->rxMode = cpu_to_le32(new_mode);
2820 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2821 VMXNET3_CMD_UPDATE_RX_MODE);
2822 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2823 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2824 }
2825
2826 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2827 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2828 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2829
2830 if (new_table_pa_valid)
2831 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2832 rxConf->mfTableLen, DMA_TO_DEVICE);
2833 kfree(new_table);
2834 }
2835
2836 void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter * adapter)2837 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2838 {
2839 int i;
2840
2841 for (i = 0; i < adapter->num_rx_queues; i++)
2842 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2843 }
2844
2845
2846 /*
2847 * Set up driver_shared based on settings in adapter.
2848 */
2849
2850 static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter * adapter)2851 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2852 {
2853 struct Vmxnet3_DriverShared *shared = adapter->shared;
2854 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2855 struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
2856 struct Vmxnet3_TxQueueConf *tqc;
2857 struct Vmxnet3_RxQueueConf *rqc;
2858 struct Vmxnet3_TxQueueTSConf *tqtsc;
2859 struct Vmxnet3_RxQueueTSConf *rqtsc;
2860 int i;
2861
2862 memset(shared, 0, sizeof(*shared));
2863
2864 /* driver settings */
2865 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2866 devRead->misc.driverInfo.version = cpu_to_le32(
2867 VMXNET3_DRIVER_VERSION_NUM);
2868 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2869 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2870 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2871 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2872 *((u32 *)&devRead->misc.driverInfo.gos));
2873 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2874 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2875
2876 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2877 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2878
2879 /* set up feature flags */
2880 if (adapter->netdev->features & NETIF_F_RXCSUM)
2881 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2882
2883 if (adapter->netdev->features & NETIF_F_LRO) {
2884 devRead->misc.uptFeatures |= UPT1_F_LRO;
2885 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2886 }
2887 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2888 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2889
2890 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2891 NETIF_F_GSO_UDP_TUNNEL_CSUM))
2892 devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
2893
2894 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2895 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2896 devRead->misc.queueDescLen = cpu_to_le32(
2897 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2898 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2899
2900 /* tx queue settings */
2901 devRead->misc.numTxQueues = adapter->num_tx_queues;
2902 for (i = 0; i < adapter->num_tx_queues; i++) {
2903 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2904 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2905 tqc = &adapter->tqd_start[i].conf;
2906 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2907 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2908 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2909 tqc->ddPA = cpu_to_le64(~0ULL);
2910 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2911 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2912 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2913 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2914 tqc->ddLen = cpu_to_le32(0);
2915 tqc->intrIdx = tq->comp_ring.intr_idx;
2916 if (VMXNET3_VERSION_GE_9(adapter)) {
2917 tqtsc = &adapter->tqd_start[i].tsConf;
2918 tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
2919 tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
2920 }
2921 }
2922
2923 /* rx queue settings */
2924 devRead->misc.numRxQueues = adapter->num_rx_queues;
2925 for (i = 0; i < adapter->num_rx_queues; i++) {
2926 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2927 rqc = &adapter->rqd_start[i].conf;
2928 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2929 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2930 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2931 rqc->ddPA = cpu_to_le64(~0ULL);
2932 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2933 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2934 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2935 rqc->ddLen = cpu_to_le32(0);
2936 rqc->intrIdx = rq->comp_ring.intr_idx;
2937 if (VMXNET3_VERSION_GE_3(adapter)) {
2938 rqc->rxDataRingBasePA =
2939 cpu_to_le64(rq->data_ring.basePA);
2940 rqc->rxDataRingDescSize =
2941 cpu_to_le16(rq->data_ring.desc_size);
2942 }
2943 if (VMXNET3_VERSION_GE_9(adapter)) {
2944 rqtsc = &adapter->rqd_start[i].tsConf;
2945 rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
2946 rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
2947 }
2948 }
2949
2950 #ifdef VMXNET3_RSS
2951 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2952
2953 if (adapter->rss) {
2954 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2955
2956 devRead->misc.uptFeatures |= UPT1_F_RSS;
2957 devRead->misc.numRxQueues = adapter->num_rx_queues;
2958 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2959 UPT1_RSS_HASH_TYPE_IPV4 |
2960 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2961 UPT1_RSS_HASH_TYPE_IPV6;
2962 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2963 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2964 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2965 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2966
2967 for (i = 0; i < rssConf->indTableSize; i++)
2968 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2969 i, adapter->num_rx_queues);
2970
2971 devRead->rssConfDesc.confVer = 1;
2972 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2973 devRead->rssConfDesc.confPA =
2974 cpu_to_le64(adapter->rss_conf_pa);
2975 }
2976
2977 #endif /* VMXNET3_RSS */
2978
2979 /* intr settings */
2980 if (!VMXNET3_VERSION_GE_6(adapter) ||
2981 !adapter->queuesExtEnabled) {
2982 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2983 VMXNET3_IMM_AUTO;
2984 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2985 for (i = 0; i < adapter->intr.num_intrs; i++)
2986 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2987
2988 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2989 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2990 } else {
2991 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2992 VMXNET3_IMM_AUTO;
2993 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2994 for (i = 0; i < adapter->intr.num_intrs; i++)
2995 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2996
2997 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2998 devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2999 }
3000
3001 /* rx filter settings */
3002 devRead->rxFilterConf.rxMode = 0;
3003 vmxnet3_restore_vlan(adapter);
3004 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
3005
3006 /* the rest are already zeroed */
3007 }
3008
3009 static void
vmxnet3_init_bufsize(struct vmxnet3_adapter * adapter)3010 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
3011 {
3012 struct Vmxnet3_DriverShared *shared = adapter->shared;
3013 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3014 unsigned long flags;
3015
3016 if (!VMXNET3_VERSION_GE_7(adapter))
3017 return;
3018
3019 cmdInfo->ringBufSize = adapter->ringBufSize;
3020 spin_lock_irqsave(&adapter->cmd_lock, flags);
3021 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3022 VMXNET3_CMD_SET_RING_BUFFER_SIZE);
3023 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3024 }
3025
3026 static void
vmxnet3_init_coalesce(struct vmxnet3_adapter * adapter)3027 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
3028 {
3029 struct Vmxnet3_DriverShared *shared = adapter->shared;
3030 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3031 unsigned long flags;
3032
3033 if (!VMXNET3_VERSION_GE_3(adapter))
3034 return;
3035
3036 spin_lock_irqsave(&adapter->cmd_lock, flags);
3037 cmdInfo->varConf.confVer = 1;
3038 cmdInfo->varConf.confLen =
3039 cpu_to_le32(sizeof(*adapter->coal_conf));
3040 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
3041
3042 if (adapter->default_coal_mode) {
3043 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3044 VMXNET3_CMD_GET_COALESCE);
3045 } else {
3046 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3047 VMXNET3_CMD_SET_COALESCE);
3048 }
3049
3050 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3051 }
3052
3053 static void
vmxnet3_init_rssfields(struct vmxnet3_adapter * adapter)3054 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
3055 {
3056 struct Vmxnet3_DriverShared *shared = adapter->shared;
3057 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
3058 unsigned long flags;
3059
3060 if (!VMXNET3_VERSION_GE_4(adapter))
3061 return;
3062
3063 spin_lock_irqsave(&adapter->cmd_lock, flags);
3064
3065 if (adapter->default_rss_fields) {
3066 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3067 VMXNET3_CMD_GET_RSS_FIELDS);
3068 adapter->rss_fields =
3069 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3070 } else {
3071 if (VMXNET3_VERSION_GE_7(adapter)) {
3072 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
3073 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
3074 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3075 VMXNET3_CAP_UDP_RSS)) {
3076 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
3077 } else {
3078 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
3079 }
3080
3081 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
3082 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3083 VMXNET3_CAP_ESP_RSS_IPV4)) {
3084 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
3085 } else {
3086 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
3087 }
3088
3089 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
3090 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3091 VMXNET3_CAP_ESP_RSS_IPV6)) {
3092 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
3093 } else {
3094 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
3095 }
3096
3097 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3098 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3099 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3100 }
3101 cmdInfo->setRssFields = adapter->rss_fields;
3102 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3103 VMXNET3_CMD_SET_RSS_FIELDS);
3104 /* Not all requested RSS may get applied, so get and
3105 * cache what was actually applied.
3106 */
3107 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3108 VMXNET3_CMD_GET_RSS_FIELDS);
3109 adapter->rss_fields =
3110 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3111 }
3112
3113 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3114 }
3115
3116 int
vmxnet3_activate_dev(struct vmxnet3_adapter * adapter)3117 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3118 {
3119 int err, i;
3120 u32 ret;
3121 unsigned long flags;
3122
3123 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3124 " ring sizes %u %u %u\n", adapter->netdev->name,
3125 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3126 adapter->tx_queue[0].tx_ring.size,
3127 adapter->rx_queue[0].rx_ring[0].size,
3128 adapter->rx_queue[0].rx_ring[1].size);
3129
3130 vmxnet3_tq_init_all(adapter);
3131 err = vmxnet3_rq_init_all(adapter);
3132 if (err) {
3133 netdev_err(adapter->netdev,
3134 "Failed to init rx queue error %d\n", err);
3135 goto rq_err;
3136 }
3137
3138 err = vmxnet3_request_irqs(adapter);
3139 if (err) {
3140 netdev_err(adapter->netdev,
3141 "Failed to setup irq for error %d\n", err);
3142 goto irq_err;
3143 }
3144
3145 vmxnet3_setup_driver_shared(adapter);
3146
3147 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3148 adapter->shared_pa));
3149 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3150 adapter->shared_pa));
3151 spin_lock_irqsave(&adapter->cmd_lock, flags);
3152 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3153 VMXNET3_CMD_ACTIVATE_DEV);
3154 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3155 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3156
3157 if (ret != 0) {
3158 netdev_err(adapter->netdev,
3159 "Failed to activate dev: error %u\n", ret);
3160 err = -EINVAL;
3161 goto activate_err;
3162 }
3163
3164 vmxnet3_init_bufsize(adapter);
3165 vmxnet3_init_coalesce(adapter);
3166 vmxnet3_init_rssfields(adapter);
3167
3168 for (i = 0; i < adapter->num_rx_queues; i++) {
3169 VMXNET3_WRITE_BAR0_REG(adapter,
3170 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3171 adapter->rx_queue[i].rx_ring[0].next2fill);
3172 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3173 (i * VMXNET3_REG_ALIGN)),
3174 adapter->rx_queue[i].rx_ring[1].next2fill);
3175 }
3176
3177 /* Apply the rx filter settins last. */
3178 vmxnet3_set_mc(adapter->netdev);
3179
3180 /*
3181 * Check link state when first activating device. It will start the
3182 * tx queue if the link is up.
3183 */
3184 vmxnet3_check_link(adapter, true);
3185 netif_tx_wake_all_queues(adapter->netdev);
3186 for (i = 0; i < adapter->num_rx_queues; i++)
3187 napi_enable(&adapter->rx_queue[i].napi);
3188 vmxnet3_enable_all_intrs(adapter);
3189 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3190 return 0;
3191
3192 activate_err:
3193 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3195 vmxnet3_free_irqs(adapter);
3196 irq_err:
3197 rq_err:
3198 /* free up buffers we allocated */
3199 vmxnet3_rq_cleanup_all(adapter);
3200 return err;
3201 }
3202
3203
3204 void
vmxnet3_reset_dev(struct vmxnet3_adapter * adapter)3205 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3206 {
3207 unsigned long flags;
3208 spin_lock_irqsave(&adapter->cmd_lock, flags);
3209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3210 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3211 }
3212
3213
3214 int
vmxnet3_quiesce_dev(struct vmxnet3_adapter * adapter)3215 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3216 {
3217 int i;
3218 unsigned long flags;
3219 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3220 return 0;
3221
3222
3223 spin_lock_irqsave(&adapter->cmd_lock, flags);
3224 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3225 VMXNET3_CMD_QUIESCE_DEV);
3226 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3227 vmxnet3_disable_all_intrs(adapter);
3228
3229 for (i = 0; i < adapter->num_rx_queues; i++)
3230 napi_disable(&adapter->rx_queue[i].napi);
3231 netif_tx_disable(adapter->netdev);
3232 adapter->link_speed = 0;
3233 netif_carrier_off(adapter->netdev);
3234
3235 vmxnet3_tq_cleanup_all(adapter);
3236 vmxnet3_rq_cleanup_all(adapter);
3237 vmxnet3_free_irqs(adapter);
3238 return 0;
3239 }
3240
3241
3242 static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter * adapter,const u8 * mac)3243 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3244 {
3245 u32 tmp;
3246
3247 tmp = *(u32 *)mac;
3248 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3249
3250 tmp = (mac[5] << 8) | mac[4];
3251 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3252 }
3253
3254
3255 static int
vmxnet3_set_mac_addr(struct net_device * netdev,void * p)3256 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
3257 {
3258 struct sockaddr *addr = p;
3259 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3260
3261 dev_addr_set(netdev, addr->sa_data);
3262 vmxnet3_write_mac_addr(adapter, addr->sa_data);
3263
3264 return 0;
3265 }
3266
3267
3268 /* ==================== initialization and cleanup routines ============ */
3269
3270 static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter * adapter)3271 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3272 {
3273 int err;
3274 unsigned long mmio_start, mmio_len;
3275 struct pci_dev *pdev = adapter->pdev;
3276
3277 err = pci_enable_device(pdev);
3278 if (err) {
3279 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3280 return err;
3281 }
3282
3283 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
3284 vmxnet3_driver_name);
3285 if (err) {
3286 dev_err(&pdev->dev,
3287 "Failed to request region for adapter: error %d\n", err);
3288 goto err_enable_device;
3289 }
3290
3291 pci_set_master(pdev);
3292
3293 mmio_start = pci_resource_start(pdev, 0);
3294 mmio_len = pci_resource_len(pdev, 0);
3295 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3296 if (!adapter->hw_addr0) {
3297 dev_err(&pdev->dev, "Failed to map bar0\n");
3298 err = -EIO;
3299 goto err_ioremap;
3300 }
3301
3302 mmio_start = pci_resource_start(pdev, 1);
3303 mmio_len = pci_resource_len(pdev, 1);
3304 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3305 if (!adapter->hw_addr1) {
3306 dev_err(&pdev->dev, "Failed to map bar1\n");
3307 err = -EIO;
3308 goto err_bar1;
3309 }
3310 return 0;
3311
3312 err_bar1:
3313 iounmap(adapter->hw_addr0);
3314 err_ioremap:
3315 pci_release_selected_regions(pdev, (1 << 2) - 1);
3316 err_enable_device:
3317 pci_disable_device(pdev);
3318 return err;
3319 }
3320
3321
3322 static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter * adapter)3323 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3324 {
3325 BUG_ON(!adapter->pdev);
3326
3327 iounmap(adapter->hw_addr0);
3328 iounmap(adapter->hw_addr1);
3329 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3330 pci_disable_device(adapter->pdev);
3331 }
3332
3333
3334 void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter * adapter)3335 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3336 {
3337 size_t sz, i, ring0_size, ring1_size, comp_size;
3338 /* With version7 ring1 will have only T0 buffers */
3339 if (!VMXNET3_VERSION_GE_7(adapter)) {
3340 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3341 VMXNET3_MAX_ETH_HDR_SIZE) {
3342 adapter->skb_buf_size = adapter->netdev->mtu +
3343 VMXNET3_MAX_ETH_HDR_SIZE;
3344 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3345 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3346
3347 adapter->rx_buf_per_pkt = 1;
3348 } else {
3349 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3350 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3351 VMXNET3_MAX_ETH_HDR_SIZE;
3352 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3353 }
3354 } else {
3355 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3356 VMXNET3_MAX_SKB_BUF_SIZE);
3357 adapter->rx_buf_per_pkt = 1;
3358 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3359 adapter->ringBufSize.ring1BufSizeType1 = 0;
3360 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3361 }
3362
3363 /*
3364 * for simplicity, force the ring0 size to be a multiple of
3365 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
3366 */
3367 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3368 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3369 ring0_size = (ring0_size + sz - 1) / sz * sz;
3370 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
3371 sz * sz);
3372 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3373 ring1_size = (ring1_size + sz - 1) / sz * sz;
3374 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
3375 sz * sz);
3376 /* For v7 and later, keep ring size power of 2 for UPT */
3377 if (VMXNET3_VERSION_GE_7(adapter)) {
3378 ring0_size = rounddown_pow_of_two(ring0_size);
3379 ring1_size = rounddown_pow_of_two(ring1_size);
3380 }
3381 comp_size = ring0_size + ring1_size;
3382
3383 for (i = 0; i < adapter->num_rx_queues; i++) {
3384 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3385
3386 rq->rx_ring[0].size = ring0_size;
3387 rq->rx_ring[1].size = ring1_size;
3388 rq->comp_ring.size = comp_size;
3389 }
3390 }
3391
3392
3393 int
vmxnet3_create_queues(struct vmxnet3_adapter * adapter,u32 tx_ring_size,u32 rx_ring_size,u32 rx_ring2_size,u16 txdata_desc_size,u16 rxdata_desc_size)3394 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3395 u32 rx_ring_size, u32 rx_ring2_size,
3396 u16 txdata_desc_size, u16 rxdata_desc_size)
3397 {
3398 int err = 0, i;
3399
3400 for (i = 0; i < adapter->num_tx_queues; i++) {
3401 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3402 tq->tx_ring.size = tx_ring_size;
3403 tq->data_ring.size = tx_ring_size;
3404 tq->comp_ring.size = tx_ring_size;
3405 tq->txdata_desc_size = txdata_desc_size;
3406 tq->shared = &adapter->tqd_start[i].ctrl;
3407 tq->stopped = true;
3408 tq->adapter = adapter;
3409 tq->qid = i;
3410 tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
3411 tq->tsPktCount = 1;
3412 err = vmxnet3_tq_create(tq, adapter);
3413 /*
3414 * Too late to change num_tx_queues. We cannot do away with
3415 * lesser number of queues than what we asked for
3416 */
3417 if (err)
3418 goto queue_err;
3419 }
3420
3421 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3422 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3423 vmxnet3_adjust_rx_ring_size(adapter);
3424
3425 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3426 for (i = 0; i < adapter->num_rx_queues; i++) {
3427 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3428 /* qid and qid2 for rx queues will be assigned later when num
3429 * of rx queues is finalized after allocating intrs */
3430 rq->shared = &adapter->rqd_start[i].ctrl;
3431 rq->adapter = adapter;
3432 rq->data_ring.desc_size = rxdata_desc_size;
3433 rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
3434 err = vmxnet3_rq_create(rq, adapter);
3435 if (err) {
3436 if (i == 0) {
3437 netdev_err(adapter->netdev,
3438 "Could not allocate any rx queues. "
3439 "Aborting.\n");
3440 goto queue_err;
3441 } else {
3442 netdev_info(adapter->netdev,
3443 "Number of rx queues changed "
3444 "to : %d.\n", i);
3445 adapter->num_rx_queues = i;
3446 err = 0;
3447 break;
3448 }
3449 }
3450 }
3451
3452 if (!adapter->rxdataring_enabled)
3453 vmxnet3_rq_destroy_all_rxdataring(adapter);
3454
3455 return err;
3456 queue_err:
3457 vmxnet3_tq_destroy_all(adapter);
3458 return err;
3459 }
3460
3461 static int
vmxnet3_open(struct net_device * netdev)3462 vmxnet3_open(struct net_device *netdev)
3463 {
3464 struct vmxnet3_adapter *adapter;
3465 int err, i;
3466
3467 adapter = netdev_priv(netdev);
3468
3469 for (i = 0; i < adapter->num_tx_queues; i++)
3470 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3471
3472 if (VMXNET3_VERSION_GE_3(adapter)) {
3473 unsigned long flags;
3474 u16 txdata_desc_size;
3475 u32 ret;
3476
3477 spin_lock_irqsave(&adapter->cmd_lock, flags);
3478 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3479 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
3480 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3481 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3482
3483 txdata_desc_size = ret & 0xffff;
3484 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
3485 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
3486 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
3487 adapter->txdata_desc_size =
3488 sizeof(struct Vmxnet3_TxDataDesc);
3489 } else {
3490 adapter->txdata_desc_size = txdata_desc_size;
3491 }
3492 if (VMXNET3_VERSION_GE_9(adapter))
3493 adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
3494 } else {
3495 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3496 }
3497
3498 if (VMXNET3_VERSION_GE_9(adapter)) {
3499 unsigned long flags;
3500 u16 tx_ts_desc_size = 0;
3501 u16 rx_ts_desc_size = 0;
3502 u32 ret;
3503
3504 spin_lock_irqsave(&adapter->cmd_lock, flags);
3505 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3506 VMXNET3_CMD_GET_TSRING_DESC_SIZE);
3507 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3508 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3509 if (ret > 0) {
3510 tx_ts_desc_size = (ret & 0xff);
3511 rx_ts_desc_size = ((ret >> 16) & 0xff);
3512 }
3513 if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
3514 tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
3515 tx_ts_desc_size = 0;
3516 if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
3517 rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
3518 rx_ts_desc_size = 0;
3519 adapter->tx_ts_desc_size = tx_ts_desc_size;
3520 adapter->rx_ts_desc_size = rx_ts_desc_size;
3521 } else {
3522 adapter->tx_ts_desc_size = 0;
3523 adapter->rx_ts_desc_size = 0;
3524 }
3525
3526 err = vmxnet3_create_queues(adapter,
3527 adapter->tx_ring_size,
3528 adapter->rx_ring_size,
3529 adapter->rx_ring2_size,
3530 adapter->txdata_desc_size,
3531 adapter->rxdata_desc_size);
3532 if (err)
3533 goto queue_err;
3534
3535 err = vmxnet3_activate_dev(adapter);
3536 if (err)
3537 goto activate_err;
3538
3539 return 0;
3540
3541 activate_err:
3542 vmxnet3_rq_destroy_all(adapter);
3543 vmxnet3_tq_destroy_all(adapter);
3544 queue_err:
3545 return err;
3546 }
3547
3548
3549 static int
vmxnet3_close(struct net_device * netdev)3550 vmxnet3_close(struct net_device *netdev)
3551 {
3552 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3553
3554 /*
3555 * Reset_work may be in the middle of resetting the device, wait for its
3556 * completion.
3557 */
3558 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3559 usleep_range(1000, 2000);
3560
3561 vmxnet3_quiesce_dev(adapter);
3562
3563 vmxnet3_rq_destroy_all(adapter);
3564 vmxnet3_tq_destroy_all(adapter);
3565
3566 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3567
3568
3569 return 0;
3570 }
3571
3572
3573 void
vmxnet3_force_close(struct vmxnet3_adapter * adapter)3574 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3575 {
3576 int i;
3577
3578 /*
3579 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
3580 * vmxnet3_close() will deadlock.
3581 */
3582 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3583
3584 /* we need to enable NAPI, otherwise dev_close will deadlock */
3585 for (i = 0; i < adapter->num_rx_queues; i++)
3586 napi_enable(&adapter->rx_queue[i].napi);
3587 /*
3588 * Need to clear the quiesce bit to ensure that vmxnet3_close
3589 * can quiesce the device properly
3590 */
3591 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3592 dev_close(adapter->netdev);
3593 }
3594
3595
3596 static int
vmxnet3_change_mtu(struct net_device * netdev,int new_mtu)3597 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
3598 {
3599 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3600 int err = 0;
3601
3602 WRITE_ONCE(netdev->mtu, new_mtu);
3603
3604 /*
3605 * Reset_work may be in the middle of resetting the device, wait for its
3606 * completion.
3607 */
3608 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3609 usleep_range(1000, 2000);
3610
3611 if (netif_running(netdev)) {
3612 vmxnet3_quiesce_dev(adapter);
3613 vmxnet3_reset_dev(adapter);
3614
3615 /* we need to re-create the rx queue based on the new mtu */
3616 vmxnet3_rq_destroy_all(adapter);
3617 vmxnet3_adjust_rx_ring_size(adapter);
3618 err = vmxnet3_rq_create_all(adapter);
3619 if (err) {
3620 netdev_err(netdev,
3621 "failed to re-create rx queues, "
3622 " error %d. Closing it.\n", err);
3623 goto out;
3624 }
3625
3626 err = vmxnet3_activate_dev(adapter);
3627 if (err) {
3628 netdev_err(netdev,
3629 "failed to re-activate, error %d. "
3630 "Closing it\n", err);
3631 goto out;
3632 }
3633 }
3634
3635 out:
3636 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3637 if (err)
3638 vmxnet3_force_close(adapter);
3639
3640 return err;
3641 }
3642
3643
3644 static void
vmxnet3_declare_features(struct vmxnet3_adapter * adapter)3645 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3646 {
3647 struct net_device *netdev = adapter->netdev;
3648 unsigned long flags;
3649
3650 if (VMXNET3_VERSION_GE_9(adapter)) {
3651 spin_lock_irqsave(&adapter->cmd_lock, flags);
3652 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3653 VMXNET3_CMD_GET_DISABLED_OFFLOADS);
3654 adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3655 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3656 }
3657
3658 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3659 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3660 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3661 NETIF_F_LRO | NETIF_F_HIGHDMA;
3662
3663 if (VMXNET3_VERSION_GE_4(adapter)) {
3664 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3665 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3666
3667 netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
3668 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3669 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3670 NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
3671 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3672 }
3673
3674 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
3675 netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3676 netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3677 }
3678
3679 if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
3680 netdev->hw_features &= ~(NETIF_F_LRO);
3681 netdev->hw_enc_features &= ~(NETIF_F_LRO);
3682 }
3683
3684 if (VMXNET3_VERSION_GE_7(adapter)) {
3685 unsigned long flags;
3686
3687 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3688 VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
3689 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3690 }
3691 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3692 VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
3693 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3694 }
3695 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3696 VMXNET3_CAP_GENEVE_TSO)) {
3697 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3698 }
3699 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3700 VMXNET3_CAP_VXLAN_TSO)) {
3701 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3702 }
3703 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3704 VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
3705 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3706 }
3707 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3708 VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
3709 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3710 }
3711
3712 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3713 spin_lock_irqsave(&adapter->cmd_lock, flags);
3714 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3715 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3716 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3717
3718 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3719 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3720 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3721 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3722 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3723 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
3724 }
3725 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3726 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3727 netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3728 netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
3729 }
3730 }
3731
3732 netdev->vlan_features = netdev->hw_features &
3733 ~(NETIF_F_HW_VLAN_CTAG_TX |
3734 NETIF_F_HW_VLAN_CTAG_RX);
3735 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3736 }
3737
3738
3739 static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter * adapter,u8 * mac)3740 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3741 {
3742 u32 tmp;
3743
3744 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3745 *(u32 *)mac = tmp;
3746
3747 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3748 mac[4] = tmp & 0xff;
3749 mac[5] = (tmp >> 8) & 0xff;
3750 }
3751
3752 #ifdef CONFIG_PCI_MSI
3753
3754 /*
3755 * Enable MSIx vectors.
3756 * Returns :
3757 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3758 * were enabled.
3759 * number of vectors which were enabled otherwise (this number is greater
3760 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3761 */
3762
3763 static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter * adapter,int nvec)3764 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3765 {
3766 int ret = pci_enable_msix_range(adapter->pdev,
3767 adapter->intr.msix_entries, nvec, nvec);
3768
3769 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3770 dev_err(&adapter->netdev->dev,
3771 "Failed to enable %d MSI-X, trying %d\n",
3772 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3773
3774 ret = pci_enable_msix_range(adapter->pdev,
3775 adapter->intr.msix_entries,
3776 VMXNET3_LINUX_MIN_MSIX_VECT,
3777 VMXNET3_LINUX_MIN_MSIX_VECT);
3778 }
3779
3780 if (ret < 0) {
3781 dev_err(&adapter->netdev->dev,
3782 "Failed to enable MSI-X, error: %d\n", ret);
3783 }
3784
3785 return ret;
3786 }
3787
3788
3789 #endif /* CONFIG_PCI_MSI */
3790
3791 static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter * adapter)3792 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3793 {
3794 u32 cfg;
3795 unsigned long flags;
3796
3797 /* intr settings */
3798 spin_lock_irqsave(&adapter->cmd_lock, flags);
3799 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3800 VMXNET3_CMD_GET_CONF_INTR);
3801 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3802 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3803 adapter->intr.type = cfg & 0x3;
3804 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3805
3806 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3807 adapter->intr.type = VMXNET3_IT_MSIX;
3808 }
3809
3810 #ifdef CONFIG_PCI_MSI
3811 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3812 int i, nvec, nvec_allocated;
3813
3814 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3815 1 : adapter->num_tx_queues;
3816 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3817 0 : adapter->num_rx_queues;
3818 nvec += 1; /* for link event */
3819 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3820 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3821
3822 for (i = 0; i < nvec; i++)
3823 adapter->intr.msix_entries[i].entry = i;
3824
3825 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3826 if (nvec_allocated < 0)
3827 goto msix_err;
3828
3829 /* If we cannot allocate one MSIx vector per queue
3830 * then limit the number of rx queues to 1
3831 */
3832 if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
3833 nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
3834 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3835 || adapter->num_rx_queues != 1) {
3836 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3837 netdev_err(adapter->netdev,
3838 "Number of rx queues : 1\n");
3839 adapter->num_rx_queues = 1;
3840 }
3841 }
3842
3843 adapter->intr.num_intrs = nvec_allocated;
3844 return;
3845
3846 msix_err:
3847 /* If we cannot allocate MSIx vectors use only one rx queue */
3848 dev_info(&adapter->pdev->dev,
3849 "Failed to enable MSI-X, error %d. "
3850 "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
3851
3852 adapter->intr.type = VMXNET3_IT_MSI;
3853 }
3854
3855 if (adapter->intr.type == VMXNET3_IT_MSI) {
3856 if (!pci_enable_msi(adapter->pdev)) {
3857 adapter->num_rx_queues = 1;
3858 adapter->intr.num_intrs = 1;
3859 return;
3860 }
3861 }
3862 #endif /* CONFIG_PCI_MSI */
3863
3864 adapter->num_rx_queues = 1;
3865 dev_info(&adapter->netdev->dev,
3866 "Using INTx interrupt, #Rx queues: 1.\n");
3867 adapter->intr.type = VMXNET3_IT_INTX;
3868
3869 /* INT-X related setting */
3870 adapter->intr.num_intrs = 1;
3871 }
3872
3873
3874 static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter * adapter)3875 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3876 {
3877 if (adapter->intr.type == VMXNET3_IT_MSIX)
3878 pci_disable_msix(adapter->pdev);
3879 else if (adapter->intr.type == VMXNET3_IT_MSI)
3880 pci_disable_msi(adapter->pdev);
3881 else
3882 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3883 }
3884
3885
3886 static void
vmxnet3_tx_timeout(struct net_device * netdev,unsigned int txqueue)3887 vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3888 {
3889 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3890 adapter->tx_timeout_count++;
3891
3892 netdev_err(adapter->netdev, "tx hang\n");
3893 schedule_work(&adapter->work);
3894 }
3895
3896
3897 static void
vmxnet3_reset_work(struct work_struct * data)3898 vmxnet3_reset_work(struct work_struct *data)
3899 {
3900 struct vmxnet3_adapter *adapter;
3901
3902 adapter = container_of(data, struct vmxnet3_adapter, work);
3903
3904 /* if another thread is resetting the device, no need to proceed */
3905 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3906 return;
3907
3908 /* if the device is closed, we must leave it alone */
3909 rtnl_lock();
3910 if (netif_running(adapter->netdev)) {
3911 netdev_notice(adapter->netdev, "resetting\n");
3912 vmxnet3_quiesce_dev(adapter);
3913 vmxnet3_reset_dev(adapter);
3914 vmxnet3_activate_dev(adapter);
3915 } else {
3916 netdev_info(adapter->netdev, "already closed\n");
3917 }
3918 rtnl_unlock();
3919
3920 netif_wake_queue(adapter->netdev);
3921 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3922 }
3923
3924
3925 static int
vmxnet3_probe_device(struct pci_dev * pdev,const struct pci_device_id * id)3926 vmxnet3_probe_device(struct pci_dev *pdev,
3927 const struct pci_device_id *id)
3928 {
3929 static const struct net_device_ops vmxnet3_netdev_ops = {
3930 .ndo_open = vmxnet3_open,
3931 .ndo_stop = vmxnet3_close,
3932 .ndo_start_xmit = vmxnet3_xmit_frame,
3933 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3934 .ndo_change_mtu = vmxnet3_change_mtu,
3935 .ndo_fix_features = vmxnet3_fix_features,
3936 .ndo_set_features = vmxnet3_set_features,
3937 .ndo_features_check = vmxnet3_features_check,
3938 .ndo_get_stats64 = vmxnet3_get_stats64,
3939 .ndo_tx_timeout = vmxnet3_tx_timeout,
3940 .ndo_set_rx_mode = vmxnet3_set_mc,
3941 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3942 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3943 #ifdef CONFIG_NET_POLL_CONTROLLER
3944 .ndo_poll_controller = vmxnet3_netpoll,
3945 #endif
3946 .ndo_bpf = vmxnet3_xdp,
3947 .ndo_xdp_xmit = vmxnet3_xdp_xmit,
3948 };
3949 int err;
3950 u32 ver;
3951 struct net_device *netdev;
3952 struct vmxnet3_adapter *adapter;
3953 u8 mac[ETH_ALEN];
3954 int size, i;
3955 int num_tx_queues;
3956 int num_rx_queues;
3957 int queues;
3958 unsigned long flags;
3959
3960 if (!pci_msi_enabled())
3961 enable_mq = 0;
3962
3963 #ifdef VMXNET3_RSS
3964 if (enable_mq)
3965 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3966 (int)num_online_cpus());
3967 else
3968 #endif
3969 num_rx_queues = 1;
3970
3971 if (enable_mq)
3972 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3973 (int)num_online_cpus());
3974 else
3975 num_tx_queues = 1;
3976
3977 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3978 max(num_tx_queues, num_rx_queues));
3979 if (!netdev)
3980 return -ENOMEM;
3981
3982 pci_set_drvdata(pdev, netdev);
3983 adapter = netdev_priv(netdev);
3984 adapter->netdev = netdev;
3985 adapter->pdev = pdev;
3986
3987 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3988 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3989 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3990
3991 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3992 if (err) {
3993 dev_err(&pdev->dev, "dma_set_mask failed\n");
3994 goto err_set_mask;
3995 }
3996
3997 spin_lock_init(&adapter->cmd_lock);
3998 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3999 sizeof(struct vmxnet3_adapter),
4000 DMA_TO_DEVICE);
4001 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
4002 dev_err(&pdev->dev, "Failed to map dma\n");
4003 err = -EFAULT;
4004 goto err_set_mask;
4005 }
4006 adapter->shared = dma_alloc_coherent(
4007 &adapter->pdev->dev,
4008 sizeof(struct Vmxnet3_DriverShared),
4009 &adapter->shared_pa, GFP_KERNEL);
4010 if (!adapter->shared) {
4011 dev_err(&pdev->dev, "Failed to allocate memory\n");
4012 err = -ENOMEM;
4013 goto err_alloc_shared;
4014 }
4015
4016 err = vmxnet3_alloc_pci_resources(adapter);
4017 if (err < 0)
4018 goto err_alloc_pci;
4019
4020 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
4021 for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
4022 if (ver & (1 << i)) {
4023 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
4024 adapter->version = i + 1;
4025 break;
4026 }
4027 }
4028 if (i < VMXNET3_REV_1) {
4029 dev_err(&pdev->dev,
4030 "Incompatible h/w version (0x%x) for adapter\n", ver);
4031 err = -EBUSY;
4032 goto err_ver;
4033 }
4034 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
4035
4036 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
4037 if (ver & 1) {
4038 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
4039 } else {
4040 dev_err(&pdev->dev,
4041 "Incompatible upt version (0x%x) for adapter\n", ver);
4042 err = -EBUSY;
4043 goto err_ver;
4044 }
4045
4046 if (VMXNET3_VERSION_GE_7(adapter)) {
4047 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
4048 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
4049 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4050 adapter->dev_caps[0] = adapter->devcap_supported[0] &
4051 (1UL << VMXNET3_CAP_LARGE_BAR);
4052 }
4053 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
4054 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
4055 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
4056 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
4057 (1UL << VMXNET3_CAP_OOORX_COMP);
4058 }
4059 if (adapter->dev_caps[0])
4060 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
4061
4062 spin_lock_irqsave(&adapter->cmd_lock, flags);
4063 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
4064 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4065 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4066 }
4067
4068 if (VMXNET3_VERSION_GE_7(adapter) &&
4069 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
4070 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
4071 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
4072 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
4073 } else {
4074 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
4075 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
4076 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
4077 }
4078
4079 if (VMXNET3_VERSION_GE_6(adapter)) {
4080 spin_lock_irqsave(&adapter->cmd_lock, flags);
4081 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4082 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4083 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4084 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4085 if (queues > 0) {
4086 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
4087 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
4088 } else {
4089 adapter->num_rx_queues = min(num_rx_queues,
4090 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4091 adapter->num_tx_queues = min(num_tx_queues,
4092 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4093 }
4094 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
4095 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
4096 adapter->queuesExtEnabled = true;
4097 } else {
4098 adapter->queuesExtEnabled = false;
4099 }
4100 } else {
4101 adapter->queuesExtEnabled = false;
4102 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4103 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
4104 adapter->num_rx_queues = min(num_rx_queues,
4105 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4106 adapter->num_tx_queues = min(num_tx_queues,
4107 VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
4108 }
4109 dev_info(&pdev->dev,
4110 "# of Tx queues : %d, # of Rx queues : %d\n",
4111 adapter->num_tx_queues, adapter->num_rx_queues);
4112
4113 adapter->rx_buf_per_pkt = 1;
4114
4115 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4116 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
4117 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
4118 &adapter->queue_desc_pa,
4119 GFP_KERNEL);
4120
4121 if (!adapter->tqd_start) {
4122 dev_err(&pdev->dev, "Failed to allocate memory\n");
4123 err = -ENOMEM;
4124 goto err_ver;
4125 }
4126 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
4127 adapter->num_tx_queues);
4128 if (VMXNET3_VERSION_GE_9(adapter))
4129 adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
4130
4131 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
4132 sizeof(struct Vmxnet3_PMConf),
4133 &adapter->pm_conf_pa,
4134 GFP_KERNEL);
4135 if (adapter->pm_conf == NULL) {
4136 err = -ENOMEM;
4137 goto err_alloc_pm;
4138 }
4139
4140 #ifdef VMXNET3_RSS
4141
4142 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4143 sizeof(struct UPT1_RSSConf),
4144 &adapter->rss_conf_pa,
4145 GFP_KERNEL);
4146 if (adapter->rss_conf == NULL) {
4147 err = -ENOMEM;
4148 goto err_alloc_rss;
4149 }
4150 #endif /* VMXNET3_RSS */
4151
4152 if (VMXNET3_VERSION_GE_3(adapter)) {
4153 adapter->coal_conf =
4154 dma_alloc_coherent(&adapter->pdev->dev,
4155 sizeof(struct Vmxnet3_CoalesceScheme)
4156 ,
4157 &adapter->coal_conf_pa,
4158 GFP_KERNEL);
4159 if (!adapter->coal_conf) {
4160 err = -ENOMEM;
4161 goto err_coal_conf;
4162 }
4163 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4164 adapter->default_coal_mode = true;
4165 }
4166
4167 if (VMXNET3_VERSION_GE_4(adapter)) {
4168 adapter->default_rss_fields = true;
4169 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4170 }
4171
4172 SET_NETDEV_DEV(netdev, &pdev->dev);
4173 vmxnet3_declare_features(adapter);
4174 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
4175 NETDEV_XDP_ACT_NDO_XMIT;
4176
4177 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4178 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
4179
4180 if (adapter->num_tx_queues == adapter->num_rx_queues)
4181 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4182 else
4183 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4184
4185 vmxnet3_alloc_intr_resources(adapter);
4186
4187 #ifdef VMXNET3_RSS
4188 if (adapter->num_rx_queues > 1 &&
4189 adapter->intr.type == VMXNET3_IT_MSIX) {
4190 adapter->rss = true;
4191 netdev->hw_features |= NETIF_F_RXHASH;
4192 netdev->features |= NETIF_F_RXHASH;
4193 dev_dbg(&pdev->dev, "RSS is enabled.\n");
4194 } else {
4195 adapter->rss = false;
4196 }
4197 #endif
4198
4199 vmxnet3_read_mac_addr(adapter, mac);
4200 dev_addr_set(netdev, mac);
4201
4202 netdev->netdev_ops = &vmxnet3_netdev_ops;
4203 vmxnet3_set_ethtool_ops(netdev);
4204 netdev->watchdog_timeo = 5 * HZ;
4205
4206 /* MTU range: 60 - 9190 */
4207 netdev->min_mtu = VMXNET3_MIN_MTU;
4208 if (VMXNET3_VERSION_GE_6(adapter))
4209 netdev->max_mtu = VMXNET3_V6_MAX_MTU;
4210 else
4211 netdev->max_mtu = VMXNET3_MAX_MTU;
4212
4213 INIT_WORK(&adapter->work, vmxnet3_reset_work);
4214 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4215
4216 if (adapter->intr.type == VMXNET3_IT_MSIX) {
4217 int i;
4218 for (i = 0; i < adapter->num_rx_queues; i++) {
4219 netif_napi_add(adapter->netdev,
4220 &adapter->rx_queue[i].napi,
4221 vmxnet3_poll_rx_only);
4222 }
4223 } else {
4224 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4225 vmxnet3_poll);
4226 }
4227
4228 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4229 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4230
4231 netif_carrier_off(netdev);
4232 err = register_netdev(netdev);
4233
4234 if (err) {
4235 dev_err(&pdev->dev, "Failed to register adapter\n");
4236 goto err_register;
4237 }
4238
4239 vmxnet3_check_link(adapter, false);
4240 return 0;
4241
4242 err_register:
4243 if (VMXNET3_VERSION_GE_3(adapter)) {
4244 dma_free_coherent(&adapter->pdev->dev,
4245 sizeof(struct Vmxnet3_CoalesceScheme),
4246 adapter->coal_conf, adapter->coal_conf_pa);
4247 }
4248 vmxnet3_free_intr_resources(adapter);
4249 err_coal_conf:
4250 #ifdef VMXNET3_RSS
4251 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4252 adapter->rss_conf, adapter->rss_conf_pa);
4253 err_alloc_rss:
4254 #endif
4255 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4256 adapter->pm_conf, adapter->pm_conf_pa);
4257 err_alloc_pm:
4258 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4259 adapter->queue_desc_pa);
4260 err_ver:
4261 vmxnet3_free_pci_resources(adapter);
4262 err_alloc_pci:
4263 dma_free_coherent(&adapter->pdev->dev,
4264 sizeof(struct Vmxnet3_DriverShared),
4265 adapter->shared, adapter->shared_pa);
4266 err_alloc_shared:
4267 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4268 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4269 err_set_mask:
4270 free_netdev(netdev);
4271 return err;
4272 }
4273
4274
4275 static void
vmxnet3_remove_device(struct pci_dev * pdev)4276 vmxnet3_remove_device(struct pci_dev *pdev)
4277 {
4278 struct net_device *netdev = pci_get_drvdata(pdev);
4279 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4280 int size = 0;
4281 int num_rx_queues, rx_queues;
4282 unsigned long flags;
4283
4284 #ifdef VMXNET3_RSS
4285 if (enable_mq)
4286 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
4287 (int)num_online_cpus());
4288 else
4289 #endif
4290 num_rx_queues = 1;
4291 if (!VMXNET3_VERSION_GE_6(adapter)) {
4292 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
4293 }
4294 if (VMXNET3_VERSION_GE_6(adapter)) {
4295 spin_lock_irqsave(&adapter->cmd_lock, flags);
4296 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4297 VMXNET3_CMD_GET_MAX_QUEUES_CONF);
4298 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4299 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4300 if (rx_queues > 0)
4301 rx_queues = (rx_queues >> 8) & 0xff;
4302 else
4303 rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4304 num_rx_queues = min(num_rx_queues, rx_queues);
4305 } else {
4306 num_rx_queues = min(num_rx_queues,
4307 VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
4308 }
4309
4310 cancel_work_sync(&adapter->work);
4311
4312 unregister_netdev(netdev);
4313
4314 vmxnet3_free_intr_resources(adapter);
4315 vmxnet3_free_pci_resources(adapter);
4316 if (VMXNET3_VERSION_GE_3(adapter)) {
4317 dma_free_coherent(&adapter->pdev->dev,
4318 sizeof(struct Vmxnet3_CoalesceScheme),
4319 adapter->coal_conf, adapter->coal_conf_pa);
4320 }
4321 #ifdef VMXNET3_RSS
4322 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4323 adapter->rss_conf, adapter->rss_conf_pa);
4324 #endif
4325 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4326 adapter->pm_conf, adapter->pm_conf_pa);
4327
4328 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4329 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
4330 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4331 adapter->queue_desc_pa);
4332 dma_free_coherent(&adapter->pdev->dev,
4333 sizeof(struct Vmxnet3_DriverShared),
4334 adapter->shared, adapter->shared_pa);
4335 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4336 sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
4337 free_netdev(netdev);
4338 }
4339
vmxnet3_shutdown_device(struct pci_dev * pdev)4340 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
4341 {
4342 struct net_device *netdev = pci_get_drvdata(pdev);
4343 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4344 unsigned long flags;
4345
4346 /* Reset_work may be in the middle of resetting the device, wait for its
4347 * completion.
4348 */
4349 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4350 usleep_range(1000, 2000);
4351
4352 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
4353 &adapter->state)) {
4354 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4355 return;
4356 }
4357 spin_lock_irqsave(&adapter->cmd_lock, flags);
4358 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4359 VMXNET3_CMD_QUIESCE_DEV);
4360 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4361 vmxnet3_disable_all_intrs(adapter);
4362
4363 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4364 }
4365
4366
4367 #ifdef CONFIG_PM
4368
4369 static int
vmxnet3_suspend(struct device * device)4370 vmxnet3_suspend(struct device *device)
4371 {
4372 struct pci_dev *pdev = to_pci_dev(device);
4373 struct net_device *netdev = pci_get_drvdata(pdev);
4374 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4375 struct Vmxnet3_PMConf *pmConf;
4376 struct ethhdr *ehdr;
4377 struct arphdr *ahdr;
4378 u8 *arpreq;
4379 struct in_device *in_dev;
4380 struct in_ifaddr *ifa;
4381 unsigned long flags;
4382 int i = 0;
4383
4384 if (!netif_running(netdev))
4385 return 0;
4386
4387 for (i = 0; i < adapter->num_rx_queues; i++)
4388 napi_disable(&adapter->rx_queue[i].napi);
4389
4390 vmxnet3_disable_all_intrs(adapter);
4391 vmxnet3_free_irqs(adapter);
4392 vmxnet3_free_intr_resources(adapter);
4393
4394 netif_device_detach(netdev);
4395
4396 /* Create wake-up filters. */
4397 pmConf = adapter->pm_conf;
4398 memset(pmConf, 0, sizeof(*pmConf));
4399
4400 if (adapter->wol & WAKE_UCAST) {
4401 pmConf->filters[i].patternSize = ETH_ALEN;
4402 pmConf->filters[i].maskSize = 1;
4403 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
4404 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
4405
4406 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4407 i++;
4408 }
4409
4410 if (adapter->wol & WAKE_ARP) {
4411 rcu_read_lock();
4412
4413 in_dev = __in_dev_get_rcu(netdev);
4414 if (!in_dev) {
4415 rcu_read_unlock();
4416 goto skip_arp;
4417 }
4418
4419 ifa = rcu_dereference(in_dev->ifa_list);
4420 if (!ifa) {
4421 rcu_read_unlock();
4422 goto skip_arp;
4423 }
4424
4425 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
4426 sizeof(struct arphdr) + /* ARP header */
4427 2 * ETH_ALEN + /* 2 Ethernet addresses*/
4428 2 * sizeof(u32); /*2 IPv4 addresses */
4429 pmConf->filters[i].maskSize =
4430 (pmConf->filters[i].patternSize - 1) / 8 + 1;
4431
4432 /* ETH_P_ARP in Ethernet header. */
4433 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
4434 ehdr->h_proto = htons(ETH_P_ARP);
4435
4436 /* ARPOP_REQUEST in ARP header. */
4437 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
4438 ahdr->ar_op = htons(ARPOP_REQUEST);
4439 arpreq = (u8 *)(ahdr + 1);
4440
4441 /* The Unicast IPv4 address in 'tip' field. */
4442 arpreq += 2 * ETH_ALEN + sizeof(u32);
4443 *(__be32 *)arpreq = ifa->ifa_address;
4444
4445 rcu_read_unlock();
4446
4447 /* The mask for the relevant bits. */
4448 pmConf->filters[i].mask[0] = 0x00;
4449 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
4450 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
4451 pmConf->filters[i].mask[3] = 0x00;
4452 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
4453 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
4454
4455 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
4456 i++;
4457 }
4458
4459 skip_arp:
4460 if (adapter->wol & WAKE_MAGIC)
4461 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
4462
4463 pmConf->numFilters = i;
4464
4465 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4466 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4467 *pmConf));
4468 adapter->shared->devRead.pmConfDesc.confPA =
4469 cpu_to_le64(adapter->pm_conf_pa);
4470
4471 spin_lock_irqsave(&adapter->cmd_lock, flags);
4472 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4473 VMXNET3_CMD_UPDATE_PMCFG);
4474 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4475
4476 pci_save_state(pdev);
4477 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
4478 adapter->wol);
4479 pci_disable_device(pdev);
4480 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
4481
4482 return 0;
4483 }
4484
4485
4486 static int
vmxnet3_resume(struct device * device)4487 vmxnet3_resume(struct device *device)
4488 {
4489 int err;
4490 unsigned long flags;
4491 struct pci_dev *pdev = to_pci_dev(device);
4492 struct net_device *netdev = pci_get_drvdata(pdev);
4493 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4494
4495 if (!netif_running(netdev))
4496 return 0;
4497
4498 pci_set_power_state(pdev, PCI_D0);
4499 pci_restore_state(pdev);
4500 err = pci_enable_device_mem(pdev);
4501 if (err != 0)
4502 return err;
4503
4504 pci_enable_wake(pdev, PCI_D0, 0);
4505
4506 vmxnet3_alloc_intr_resources(adapter);
4507
4508 /* During hibernate and suspend, device has to be reinitialized as the
4509 * device state need not be preserved.
4510 */
4511
4512 /* Need not check adapter state as other reset tasks cannot run during
4513 * device resume.
4514 */
4515 spin_lock_irqsave(&adapter->cmd_lock, flags);
4516 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4517 VMXNET3_CMD_QUIESCE_DEV);
4518 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4519 vmxnet3_tq_cleanup_all(adapter);
4520 vmxnet3_rq_cleanup_all(adapter);
4521
4522 vmxnet3_reset_dev(adapter);
4523 err = vmxnet3_activate_dev(adapter);
4524 if (err != 0) {
4525 netdev_err(netdev,
4526 "failed to re-activate on resume, error: %d", err);
4527 vmxnet3_force_close(adapter);
4528 return err;
4529 }
4530 netif_device_attach(netdev);
4531
4532 return 0;
4533 }
4534
4535 static const struct dev_pm_ops vmxnet3_pm_ops = {
4536 .suspend = vmxnet3_suspend,
4537 .resume = vmxnet3_resume,
4538 .freeze = vmxnet3_suspend,
4539 .restore = vmxnet3_resume,
4540 };
4541 #endif
4542
4543 static struct pci_driver vmxnet3_driver = {
4544 .name = vmxnet3_driver_name,
4545 .id_table = vmxnet3_pciid_table,
4546 .probe = vmxnet3_probe_device,
4547 .remove = vmxnet3_remove_device,
4548 .shutdown = vmxnet3_shutdown_device,
4549 #ifdef CONFIG_PM
4550 .driver.pm = &vmxnet3_pm_ops,
4551 #endif
4552 };
4553
4554
4555 static int __init
vmxnet3_init_module(void)4556 vmxnet3_init_module(void)
4557 {
4558 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
4559 VMXNET3_DRIVER_VERSION_REPORT);
4560 return pci_register_driver(&vmxnet3_driver);
4561 }
4562
4563 module_init(vmxnet3_init_module);
4564
4565
4566 static void
vmxnet3_exit_module(void)4567 vmxnet3_exit_module(void)
4568 {
4569 pci_unregister_driver(&vmxnet3_driver);
4570 }
4571
4572 module_exit(vmxnet3_exit_module);
4573
4574 MODULE_AUTHOR("VMware, Inc.");
4575 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
4576 MODULE_LICENSE("GPL v2");
4577 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
4578