Lines Matching refs:gq
207 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, in rswitch_next_queue_index() argument
210 unsigned int index = cur ? gq->cur : gq->dirty; in rswitch_next_queue_index()
212 if (index + num >= gq->ring_size) in rswitch_next_queue_index()
213 index = (index + num) % gq->ring_size; in rswitch_next_queue_index()
220 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) in rswitch_get_num_cur_queues() argument
222 if (gq->cur >= gq->dirty) in rswitch_get_num_cur_queues()
223 return gq->cur - gq->dirty; in rswitch_get_num_cur_queues()
225 return gq->ring_size - gq->dirty + gq->cur; in rswitch_get_num_cur_queues()
228 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) in rswitch_is_queue_rxed() argument
230 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; in rswitch_is_queue_rxed()
238 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq, in rswitch_gwca_queue_alloc_rx_buf() argument
245 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
246 if (gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
248 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE); in rswitch_gwca_queue_alloc_rx_buf()
249 if (!gq->rx_bufs[index]) in rswitch_gwca_queue_alloc_rx_buf()
257 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_alloc_rx_buf()
258 skb_free_frag(gq->rx_bufs[index]); in rswitch_gwca_queue_alloc_rx_buf()
259 gq->rx_bufs[index] = NULL; in rswitch_gwca_queue_alloc_rx_buf()
266 struct rswitch_gwca_queue *gq) in rswitch_gwca_queue_free() argument
270 if (!gq->dir_tx) { in rswitch_gwca_queue_free()
273 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
274 gq->rx_ring = NULL; in rswitch_gwca_queue_free()
276 for (i = 0; i < gq->ring_size; i++) in rswitch_gwca_queue_free()
277 skb_free_frag(gq->rx_bufs[i]); in rswitch_gwca_queue_free()
278 kfree(gq->rx_bufs); in rswitch_gwca_queue_free()
279 gq->rx_bufs = NULL; in rswitch_gwca_queue_free()
283 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); in rswitch_gwca_queue_free()
284 gq->tx_ring = NULL; in rswitch_gwca_queue_free()
285 kfree(gq->skbs); in rswitch_gwca_queue_free()
286 gq->skbs = NULL; in rswitch_gwca_queue_free()
287 kfree(gq->unmap_addrs); in rswitch_gwca_queue_free()
288 gq->unmap_addrs = NULL; in rswitch_gwca_queue_free()
294 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_free() local
297 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), in rswitch_gwca_ts_queue_free()
298 gq->ts_ring, gq->ring_dma); in rswitch_gwca_ts_queue_free()
299 gq->ts_ring = NULL; in rswitch_gwca_ts_queue_free()
304 struct rswitch_gwca_queue *gq, in rswitch_gwca_queue_alloc() argument
309 gq->dir_tx = dir_tx; in rswitch_gwca_queue_alloc()
310 gq->ring_size = ring_size; in rswitch_gwca_queue_alloc()
311 gq->ndev = ndev; in rswitch_gwca_queue_alloc()
314 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
315 if (!gq->rx_bufs) in rswitch_gwca_queue_alloc()
317 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0) in rswitch_gwca_queue_alloc()
320 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
322 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
324 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
325 if (!gq->skbs) in rswitch_gwca_queue_alloc()
327 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
328 if (!gq->unmap_addrs) in rswitch_gwca_queue_alloc()
330 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, in rswitch_gwca_queue_alloc()
332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_queue_alloc()
335 if (!gq->rx_ring && !gq->tx_ring) in rswitch_gwca_queue_alloc()
338 i = gq->index / 32; in rswitch_gwca_queue_alloc()
339 bit = BIT(gq->index % 32); in rswitch_gwca_queue_alloc()
348 rswitch_gwca_queue_free(ndev, gq); in rswitch_gwca_queue_alloc()
366 struct rswitch_gwca_queue *gq) in rswitch_gwca_queue_format() argument
368 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; in rswitch_gwca_queue_format()
374 memset(gq->tx_ring, 0, ring_size); in rswitch_gwca_queue_format()
375 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { in rswitch_gwca_queue_format()
376 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
378 gq->rx_bufs[i] + RSWITCH_HEADROOM, in rswitch_gwca_queue_format()
391 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_format()
394 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_format()
396 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_format()
398 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE, in rswitch_gwca_queue_format()
399 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_format()
404 if (!gq->dir_tx) { in rswitch_gwca_queue_format()
405 for (desc = gq->tx_ring; i-- > 0; desc++) { in rswitch_gwca_queue_format()
419 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_fill() local
424 index = (i + start_index) % gq->ring_size; in rswitch_gwca_ts_queue_fill()
425 desc = &gq->ts_ring[index]; in rswitch_gwca_ts_queue_fill()
431 struct rswitch_gwca_queue *gq, in rswitch_gwca_queue_ext_ts_fill() argument
441 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
442 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
443 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
445 gq->rx_bufs[index] + RSWITCH_HEADROOM, in rswitch_gwca_queue_ext_ts_fill()
464 if (!gq->dir_tx) { in rswitch_gwca_queue_ext_ts_fill()
466 index = (i + start_index) % gq->ring_size; in rswitch_gwca_queue_ext_ts_fill()
467 desc = &gq->rx_ring[index]; in rswitch_gwca_queue_ext_ts_fill()
479 struct rswitch_gwca_queue *gq) in rswitch_gwca_queue_ext_ts_format() argument
481 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; in rswitch_gwca_queue_ext_ts_format()
486 memset(gq->rx_ring, 0, ring_size); in rswitch_gwca_queue_ext_ts_format()
487 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); in rswitch_gwca_queue_ext_ts_format()
491 desc = &gq->rx_ring[gq->ring_size]; /* Last */ in rswitch_gwca_queue_ext_ts_format()
492 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
495 linkfix = &priv->gwca.linkfix_table[gq->index]; in rswitch_gwca_queue_ext_ts_format()
497 rswitch_desc_set_dptr(linkfix, gq->ring_dma); in rswitch_gwca_queue_ext_ts_format()
499 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | in rswitch_gwca_queue_ext_ts_format()
501 priv->addr + GWDCC_OFFS(gq->index)); in rswitch_gwca_queue_ext_ts_format()
535 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_gwca_ts_queue_alloc() local
538 gq->ring_size = TS_RING_SIZE; in rswitch_gwca_ts_queue_alloc()
539 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, in rswitch_gwca_ts_queue_alloc()
541 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); in rswitch_gwca_ts_queue_alloc()
543 if (!gq->ts_ring) in rswitch_gwca_ts_queue_alloc()
547 desc = &gq->ts_ring[gq->ring_size]; in rswitch_gwca_ts_queue_alloc()
549 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); in rswitch_gwca_ts_queue_alloc()
557 struct rswitch_gwca_queue *gq; in rswitch_gwca_get() local
564 gq = &priv->gwca.queues[index]; in rswitch_gwca_get()
565 memset(gq, 0, sizeof(*gq)); in rswitch_gwca_get()
566 gq->index = index; in rswitch_gwca_get()
568 return gq; in rswitch_gwca_get()
572 struct rswitch_gwca_queue *gq) in rswitch_gwca_put() argument
574 clear_bit(gq->index, priv->gwca.used); in rswitch_gwca_put()
718 struct rswitch_gwca_queue *gq, in rswitch_rx_handle_desc() argument
739 if (gq->skb_fstart) { in rswitch_rx_handle_desc()
740 dev_kfree_skb_any(gq->skb_fstart); in rswitch_rx_handle_desc()
741 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
747 if (!gq->skb_fstart) { in rswitch_rx_handle_desc()
760 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE); in rswitch_rx_handle_desc()
764 gq->pkt_len = pkt_len; in rswitch_rx_handle_desc()
766 gq->skb_fstart = skb; in rswitch_rx_handle_desc()
773 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags, in rswitch_rx_handle_desc()
774 virt_to_page(gq->rx_bufs[gq->cur]), in rswitch_rx_handle_desc()
775 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM, in rswitch_rx_handle_desc()
778 skb = gq->skb_fstart; in rswitch_rx_handle_desc()
779 gq->skb_fstart = NULL; in rswitch_rx_handle_desc()
781 gq->pkt_len += pkt_len; in rswitch_rx_handle_desc()
794 struct rswitch_gwca_queue *gq = rdev->rx_queue; in rswitch_rx() local
804 boguscnt = min_t(int, gq->ring_size, *quota); in rswitch_rx()
807 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
810 skb = rswitch_rx_handle_desc(ndev, gq, desc); in rswitch_rx()
828 rdev->ndev->stats.rx_bytes += gq->pkt_len; in rswitch_rx()
831 gq->rx_bufs[gq->cur] = NULL; in rswitch_rx()
832 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_rx()
833 desc = &gq->rx_ring[gq->cur]; in rswitch_rx()
839 num = rswitch_get_num_cur_queues(gq); in rswitch_rx()
840 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num); in rswitch_rx()
843 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); in rswitch_rx()
846 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_rx()
861 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_tx_free() local
865 for (; rswitch_get_num_cur_queues(gq) > 0; in rswitch_tx_free()
866 gq->dirty = rswitch_next_queue_index(gq, false, 1)) { in rswitch_tx_free()
867 desc = &gq->tx_ring[gq->dirty]; in rswitch_tx_free()
872 skb = gq->skbs[gq->dirty]; in rswitch_tx_free()
877 gq->unmap_addrs[gq->dirty], in rswitch_tx_free()
879 dev_kfree_skb_any(gq->skbs[gq->dirty]); in rswitch_tx_free()
880 gq->skbs[gq->dirty] = NULL; in rswitch_tx_free()
940 struct rswitch_gwca_queue *gq; in rswitch_data_irq() local
944 gq = &priv->gwca.queues[i]; in rswitch_data_irq()
945 index = gq->index / 32; in rswitch_data_irq()
946 bit = BIT(gq->index % 32); in rswitch_data_irq()
950 rswitch_ack_data_irq(priv, gq->index); in rswitch_data_irq()
951 rswitch_queue_interrupt(gq->ndev); in rswitch_data_irq()
1003 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; in rswitch_ts() local
1011 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1033 gq->cur = rswitch_next_queue_index(gq, true, 1); in rswitch_ts()
1034 desc = &gq->ts_ring[gq->cur]; in rswitch_ts()
1037 num = rswitch_get_num_cur_queues(gq); in rswitch_ts()
1038 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); in rswitch_ts()
1039 gq->dirty = rswitch_next_queue_index(gq, false, num); in rswitch_ts()
1663 struct rswitch_gwca_queue *gq = rdev->tx_queue; in rswitch_start_xmit() local
1672 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) { in rswitch_start_xmit()
1684 gq->skbs[gq->cur] = skb; in rswitch_start_xmit()
1685 gq->unmap_addrs[gq->cur] = dma_addr_orig; in rswitch_start_xmit()
1689 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)]; in rswitch_start_xmit()
1699 gq->cur = rswitch_next_queue_index(gq, true, nr_desc); in rswitch_start_xmit()
1700 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32)); in rswitch_start_xmit()