Lines Matching +full:always +full:- +full:turbo

7  *  Copyright (c) 2000-2008 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com)
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
25 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
44 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
73 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
152 static u32 tx_max_out_p = 127 - 16;
154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
156 * lan_reply - Handle all data sent from the hardware.
167 struct net_device *dev = ioc->netdev; in lan_reply()
191 // "MessageContext turbo reply received\n")); in lan_reply()
198 // "calling mpt_lan_send_reply (turbo)\n")); in lan_reply()
207 // item back onto its adapter FreeQ - Oops!:-( in lan_reply()
209 // always returns 0, but..., just in case: in lan_reply()
218 // "rcv-Turbo = %08x\n", tmsg)); in lan_reply()
223 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply " in lan_reply()
239 // reply->u.hdr.Function)); in lan_reply()
241 switch (reply->u.hdr.Function) { in lan_reply()
257 if (pRecvRep->NumberOfContexts) { in lan_reply()
259 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) in lan_reply()
283 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo " in lan_reply()
295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
299 struct net_device *dev = ioc->netdev; in mpt_lan_ioc_reset()
311 if (priv->mpt_rxfidx == NULL) in mpt_lan_ioc_reset()
322 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name)); in mpt_lan_ioc_reset()
324 atomic_set(&priv->buckets_out, 0); in mpt_lan_ioc_reset()
326 /* Reset Rx Free Tail index and re-populate the queue. */ in mpt_lan_ioc_reset()
327 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_ioc_reset()
328 priv->mpt_rxfidx_tail = -1; in mpt_lan_ioc_reset()
329 for (i = 0; i < priv->max_buckets_out; i++) in mpt_lan_ioc_reset()
330 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; in mpt_lan_ioc_reset()
331 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_ioc_reset()
340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
346 switch (le32_to_cpu(pEvReply->Event)) { in mpt_lan_event_process()
366 * NOTE: pEvent->AckRequired handling now done in mptbase.c; in mpt_lan_event_process()
373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
381 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_open()
385 if (mpt_dev->active) in mpt_lan_open()
394 priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int), in mpt_lan_open()
396 if (priv->mpt_txfidx == NULL) in mpt_lan_open()
398 priv->mpt_txfidx_tail = -1; in mpt_lan_open()
400 priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl), in mpt_lan_open()
402 if (priv->SendCtl == NULL) in mpt_lan_open()
404 for (i = 0; i < priv->tx_max_out; i++) in mpt_lan_open()
405 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; in mpt_lan_open()
409 priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int), in mpt_lan_open()
411 if (priv->mpt_rxfidx == NULL) in mpt_lan_open()
413 priv->mpt_rxfidx_tail = -1; in mpt_lan_open()
415 priv->RcvCtl = kcalloc(priv->max_buckets_out, in mpt_lan_open()
418 if (priv->RcvCtl == NULL) in mpt_lan_open()
420 for (i = 0; i < priv->max_buckets_out; i++) in mpt_lan_open()
421 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; in mpt_lan_open()
423 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); in mpt_lan_open()
424 /**/ for (i = 0; i < priv->tx_max_out; i++) in mpt_lan_open()
425 /**/ dlprintk((" %xh", priv->mpt_txfidx[i])); in mpt_lan_open()
446 kfree(priv->mpt_rxfidx); in mpt_lan_open()
447 priv->mpt_rxfidx = NULL; in mpt_lan_open()
449 kfree(priv->SendCtl); in mpt_lan_open()
450 priv->SendCtl = NULL; in mpt_lan_open()
452 kfree(priv->mpt_txfidx); in mpt_lan_open()
453 priv->mpt_txfidx = NULL; in mpt_lan_open()
454 out: return -ENOMEM; in mpt_lan_open()
457 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
467 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev); in mpt_lan_reset()
473 return -1; in mpt_lan_reset()
478 pResetReq->Function = MPI_FUNCTION_LAN_RESET; in mpt_lan_reset()
479 pResetReq->ChainOffset = 0; in mpt_lan_reset()
480 pResetReq->Reserved = 0; in mpt_lan_reset()
481 pResetReq->PortNumber = priv->pnum; in mpt_lan_reset()
482 pResetReq->MsgFlags = 0; in mpt_lan_reset()
483 pResetReq->Reserved2 = 0; in mpt_lan_reset()
485 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf); in mpt_lan_reset()
490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
495 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_close()
505 priv->total_posted,atomic_read(&priv->buckets_out))); in mpt_lan_close()
512 while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout)) in mpt_lan_close()
515 for (i = 0; i < priv->max_buckets_out; i++) { in mpt_lan_close()
516 if (priv->RcvCtl[i].skb != NULL) { in mpt_lan_close()
519 dma_unmap_single(&mpt_dev->pcidev->dev, in mpt_lan_close()
520 priv->RcvCtl[i].dma, in mpt_lan_close()
521 priv->RcvCtl[i].len, DMA_FROM_DEVICE); in mpt_lan_close()
522 dev_kfree_skb(priv->RcvCtl[i].skb); in mpt_lan_close()
526 kfree(priv->RcvCtl); in mpt_lan_close()
527 kfree(priv->mpt_rxfidx); in mpt_lan_close()
529 for (i = 0; i < priv->tx_max_out; i++) { in mpt_lan_close()
530 if (priv->SendCtl[i].skb != NULL) { in mpt_lan_close()
531 dma_unmap_single(&mpt_dev->pcidev->dev, in mpt_lan_close()
532 priv->SendCtl[i].dma, in mpt_lan_close()
533 priv->SendCtl[i].len, DMA_TO_DEVICE); in mpt_lan_close()
534 dev_kfree_skb(priv->SendCtl[i].skb); in mpt_lan_close()
538 kfree(priv->SendCtl); in mpt_lan_close()
539 kfree(priv->mpt_txfidx); in mpt_lan_close()
541 atomic_set(&priv->buckets_out, 0); in mpt_lan_close()
549 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
555 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_tx_timeout()
557 if (mpt_dev->active) { in mpt_lan_tx_timeout()
558 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); in mpt_lan_tx_timeout()
563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
569 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_send_turbo()
575 sent = priv->SendCtl[ctx].skb; in mpt_lan_send_turbo()
577 dev->stats.tx_packets++; in mpt_lan_send_turbo()
578 dev->stats.tx_bytes += sent->len; in mpt_lan_send_turbo()
584 priv->SendCtl[ctx].skb = NULL; in mpt_lan_send_turbo()
585 dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma, in mpt_lan_send_turbo()
586 priv->SendCtl[ctx].len, DMA_TO_DEVICE); in mpt_lan_send_turbo()
589 spin_lock_irqsave(&priv->txfidx_lock, flags); in mpt_lan_send_turbo()
590 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; in mpt_lan_send_turbo()
591 spin_unlock_irqrestore(&priv->txfidx_lock, flags); in mpt_lan_send_turbo()
597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
602 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_send_reply()
610 count = pSendRep->NumberOfContexts; in mpt_lan_send_reply()
613 le16_to_cpu(pSendRep->IOCStatus))); in mpt_lan_send_reply()
617 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { in mpt_lan_send_reply()
619 dev->stats.tx_packets += count; in mpt_lan_send_reply()
627 dev->stats.tx_errors += count; in mpt_lan_send_reply()
628 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", in mpt_lan_send_reply()
633 dev->stats.tx_errors += count; in mpt_lan_send_reply()
637 pContext = &pSendRep->BufferContext; in mpt_lan_send_reply()
639 spin_lock_irqsave(&priv->txfidx_lock, flags); in mpt_lan_send_reply()
643 sent = priv->SendCtl[ctx].skb; in mpt_lan_send_reply()
644 dev->stats.tx_bytes += sent->len; in mpt_lan_send_reply()
650 priv->SendCtl[ctx].skb = NULL; in mpt_lan_send_reply()
651 dma_unmap_single(&mpt_dev->pcidev->dev, in mpt_lan_send_reply()
652 priv->SendCtl[ctx].dma, in mpt_lan_send_reply()
653 priv->SendCtl[ctx].len, DMA_TO_DEVICE); in mpt_lan_send_reply()
656 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; in mpt_lan_send_reply()
659 count--; in mpt_lan_send_reply()
661 spin_unlock_irqrestore(&priv->txfidx_lock, flags); in mpt_lan_send_reply()
664 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) in mpt_lan_send_reply()
671 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
676 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_sdu_send()
690 spin_lock_irqsave(&priv->txfidx_lock, flags); in mpt_lan_sdu_send()
691 if (priv->mpt_txfidx_tail < 0) { in mpt_lan_sdu_send()
693 spin_unlock_irqrestore(&priv->txfidx_lock, flags); in mpt_lan_sdu_send()
696 __func__, priv->mpt_txfidx_tail); in mpt_lan_sdu_send()
703 spin_unlock_irqrestore(&priv->txfidx_lock, flags); in mpt_lan_sdu_send()
710 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--]; in mpt_lan_sdu_send()
711 spin_unlock_irqrestore(&priv->txfidx_lock, flags); in mpt_lan_sdu_send()
724 dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len, in mpt_lan_sdu_send()
727 priv->SendCtl[ctx].skb = skb; in mpt_lan_sdu_send()
728 priv->SendCtl[ctx].dma = dma; in mpt_lan_sdu_send()
729 priv->SendCtl[ctx].len = skb->len; in mpt_lan_sdu_send()
732 pSendReq->Reserved = 0; in mpt_lan_sdu_send()
733 pSendReq->Function = MPI_FUNCTION_LAN_SEND; in mpt_lan_sdu_send()
734 pSendReq->ChainOffset = 0; in mpt_lan_sdu_send()
735 pSendReq->Reserved2 = 0; in mpt_lan_sdu_send()
736 pSendReq->MsgFlags = 0; in mpt_lan_sdu_send()
737 pSendReq->PortNumber = priv->pnum; in mpt_lan_sdu_send()
740 pTrans = (SGETransaction32_t *) pSendReq->SG_List; in mpt_lan_sdu_send()
742 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */ in mpt_lan_sdu_send()
743 pTrans->ContextSize = sizeof(u32); in mpt_lan_sdu_send()
744 pTrans->DetailsLength = 2 * sizeof(u32); in mpt_lan_sdu_send()
745 pTrans->Flags = 0; in mpt_lan_sdu_send()
746 pTrans->TransactionContext = cpu_to_le32(ctx); in mpt_lan_sdu_send()
750 // ctx, skb, skb->data)); in mpt_lan_sdu_send()
754 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | in mpt_lan_sdu_send()
757 pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) | in mpt_lan_sdu_send()
762 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; in mpt_lan_sdu_send()
767 pSimple->FlagsLength = cpu_to_le32( in mpt_lan_sdu_send()
775 skb->len); in mpt_lan_sdu_send()
776 pSimple->Address.Low = cpu_to_le32((u32) dma); in mpt_lan_sdu_send()
778 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32)); in mpt_lan_sdu_send()
780 pSimple->Address.High = 0; in mpt_lan_sdu_send()
787 le32_to_cpu(pSimple->FlagsLength))); in mpt_lan_sdu_send()
792 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
801 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { in mpt_lan_wake_post_buckets_task()
803 schedule_delayed_work(&priv->post_buckets_task, 0); in mpt_lan_wake_post_buckets_task()
805 schedule_delayed_work(&priv->post_buckets_task, 1); in mpt_lan_wake_post_buckets_task()
814 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
820 skb->protocol = mpt_lan_type_trans(skb, dev); in mpt_lan_receive_skb()
824 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); in mpt_lan_receive_skb()
826 dev->stats.rx_bytes += skb->len; in mpt_lan_receive_skb()
827 dev->stats.rx_packets++; in mpt_lan_receive_skb()
829 skb->dev = dev; in mpt_lan_receive_skb()
833 atomic_read(&priv->buckets_out))); in mpt_lan_receive_skb()
835 if (atomic_read(&priv->buckets_out) < priv->bucketthresh) in mpt_lan_receive_skb()
840 atomic_read(&priv->buckets_out), priv->total_received)); in mpt_lan_receive_skb()
845 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
851 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_receive_post_turbo()
857 skb = priv->RcvCtl[ctx].skb; in mpt_lan_receive_post_turbo()
866 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", in mpt_lan_receive_post_turbo()
869 return -ENOMEM; in mpt_lan_receive_post_turbo()
872 dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_turbo()
873 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_turbo()
874 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_turbo()
879 dma_sync_single_for_device(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_turbo()
880 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_turbo()
881 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_turbo()
888 priv->RcvCtl[ctx].skb = NULL; in mpt_lan_receive_post_turbo()
890 dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_turbo()
891 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); in mpt_lan_receive_post_turbo()
894 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_turbo()
895 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_receive_post_turbo()
896 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_turbo()
898 atomic_dec(&priv->buckets_out); in mpt_lan_receive_post_turbo()
899 priv->total_received++; in mpt_lan_receive_post_turbo()
904 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
910 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_receive_post_free()
917 count = pRecvRep->NumberOfContexts; in mpt_lan_receive_post_free()
922 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_free()
924 ctx = le32_to_cpu(pRecvRep->BucketContext[i]); in mpt_lan_receive_post_free()
926 skb = priv->RcvCtl[ctx].skb; in mpt_lan_receive_post_free()
931 // priv, &(priv->buckets_out))); in mpt_lan_receive_post_free()
934 priv->RcvCtl[ctx].skb = NULL; in mpt_lan_receive_post_free()
935 dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_free()
936 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); in mpt_lan_receive_post_free()
939 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_receive_post_free()
941 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_free()
943 atomic_sub(count, &priv->buckets_out); in mpt_lan_receive_post_free()
945 // for (i = 0; i < priv->max_buckets_out; i++) in mpt_lan_receive_post_free()
946 // if (priv->RcvCtl[i].skb != NULL) in mpt_lan_receive_post_free()
955 /**/ atomic_read(&priv->buckets_out), priv->total_received)); in mpt_lan_receive_post_free()
959 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
965 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_receive_post_reply()
969 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); in mpt_lan_receive_post_reply()
975 le16_to_cpu(pRecvRep->IOCStatus))); in mpt_lan_receive_post_reply()
977 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) == in mpt_lan_receive_post_reply()
981 len = le32_to_cpu(pRecvRep->PacketLength); in mpt_lan_receive_post_reply()
983 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO " in mpt_lan_receive_post_reply()
987 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus)); in mpt_lan_receive_post_reply()
988 return -1; in mpt_lan_receive_post_reply()
991 ctx = le32_to_cpu(pRecvRep->BucketContext[0]); in mpt_lan_receive_post_reply()
992 count = pRecvRep->NumberOfContexts; in mpt_lan_receive_post_reply()
993 skb = priv->RcvCtl[ctx].skb; in mpt_lan_receive_post_reply()
995 offset = le32_to_cpu(pRecvRep->PacketOffset); in mpt_lan_receive_post_reply()
1016 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", in mpt_lan_receive_post_reply()
1019 return -ENOMEM; in mpt_lan_receive_post_reply()
1022 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1025 ctx = le32_to_cpu(pRecvRep->BucketContext[i]); in mpt_lan_receive_post_reply()
1026 old_skb = priv->RcvCtl[ctx].skb; in mpt_lan_receive_post_reply()
1028 l = priv->RcvCtl[ctx].len; in mpt_lan_receive_post_reply()
1036 dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_reply()
1037 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_reply()
1038 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_reply()
1042 dma_sync_single_for_device(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_reply()
1043 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_reply()
1044 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_reply()
1047 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_receive_post_reply()
1048 szrem -= l; in mpt_lan_receive_post_reply()
1050 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1058 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", in mpt_lan_receive_post_reply()
1061 return -ENOMEM; in mpt_lan_receive_post_reply()
1064 dma_sync_single_for_cpu(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_reply()
1065 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_reply()
1066 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_reply()
1071 dma_sync_single_for_device(&mpt_dev->pcidev->dev, in mpt_lan_receive_post_reply()
1072 priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_reply()
1073 priv->RcvCtl[ctx].len, in mpt_lan_receive_post_reply()
1076 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1077 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_receive_post_reply()
1078 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1081 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1083 priv->RcvCtl[ctx].skb = NULL; in mpt_lan_receive_post_reply()
1085 dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma, in mpt_lan_receive_post_reply()
1086 priv->RcvCtl[ctx].len, DMA_FROM_DEVICE); in mpt_lan_receive_post_reply()
1087 priv->RcvCtl[ctx].dma = 0; in mpt_lan_receive_post_reply()
1089 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_receive_post_reply()
1090 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_receive_post_reply()
1095 atomic_sub(count, &priv->buckets_out); in mpt_lan_receive_post_reply()
1096 priv->total_received += count; in mpt_lan_receive_post_reply()
1098 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) { in mpt_lan_receive_post_reply()
1102 priv->mpt_rxfidx_tail, in mpt_lan_receive_post_reply()
1105 return -1; in mpt_lan_receive_post_reply()
1109 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " in mpt_lan_receive_post_reply()
1110 "(priv->buckets_out = %d)\n", in mpt_lan_receive_post_reply()
1112 atomic_read(&priv->buckets_out)); in mpt_lan_receive_post_reply()
1115 "(priv->buckets_out = %d)\n", in mpt_lan_receive_post_reply()
1117 remaining, atomic_read(&priv->buckets_out)); in mpt_lan_receive_post_reply()
1119 if ((remaining < priv->bucketthresh) && in mpt_lan_receive_post_reply()
1120 ((atomic_read(&priv->buckets_out) - remaining) > in mpt_lan_receive_post_reply()
1137 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1143 struct net_device *dev = priv->dev; in mpt_lan_post_receive_buckets()
1144 MPT_ADAPTER *mpt_dev = priv->mpt_dev; in mpt_lan_post_receive_buckets()
1152 u32 len = (dev->mtu + dev->hard_header_len + 4); in mpt_lan_post_receive_buckets()
1156 curr = atomic_read(&priv->buckets_out); in mpt_lan_post_receive_buckets()
1157 buckets = (priv->max_buckets_out - curr); in mpt_lan_post_receive_buckets()
1163 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / in mpt_lan_post_receive_buckets()
1177 i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); in mpt_lan_post_receive_buckets()
1178 mpt_dev->RequestNB[i] = 0; in mpt_lan_post_receive_buckets()
1183 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE; in mpt_lan_post_receive_buckets()
1184 pRecvReq->ChainOffset = 0; in mpt_lan_post_receive_buckets()
1185 pRecvReq->MsgFlags = 0; in mpt_lan_post_receive_buckets()
1186 pRecvReq->PortNumber = priv->pnum; in mpt_lan_post_receive_buckets()
1188 pTrans = (SGETransaction32_t *) pRecvReq->SG_List; in mpt_lan_post_receive_buckets()
1194 spin_lock_irqsave(&priv->rxfidx_lock, flags); in mpt_lan_post_receive_buckets()
1195 if (priv->mpt_rxfidx_tail < 0) { in mpt_lan_post_receive_buckets()
1198 spin_unlock_irqrestore(&priv->rxfidx_lock, in mpt_lan_post_receive_buckets()
1203 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--]; in mpt_lan_post_receive_buckets()
1205 skb = priv->RcvCtl[ctx].skb; in mpt_lan_post_receive_buckets()
1206 if (skb && (priv->RcvCtl[ctx].len != len)) { in mpt_lan_post_receive_buckets()
1207 dma_unmap_single(&mpt_dev->pcidev->dev, in mpt_lan_post_receive_buckets()
1208 priv->RcvCtl[ctx].dma, in mpt_lan_post_receive_buckets()
1209 priv->RcvCtl[ctx].len, in mpt_lan_post_receive_buckets()
1211 dev_kfree_skb(priv->RcvCtl[ctx].skb); in mpt_lan_post_receive_buckets()
1212 skb = priv->RcvCtl[ctx].skb = NULL; in mpt_lan_post_receive_buckets()
1221 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; in mpt_lan_post_receive_buckets()
1222 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_post_receive_buckets()
1226 dma = dma_map_single(&mpt_dev->pcidev->dev, in mpt_lan_post_receive_buckets()
1227 skb->data, len, in mpt_lan_post_receive_buckets()
1230 priv->RcvCtl[ctx].skb = skb; in mpt_lan_post_receive_buckets()
1231 priv->RcvCtl[ctx].dma = dma; in mpt_lan_post_receive_buckets()
1232 priv->RcvCtl[ctx].len = len; in mpt_lan_post_receive_buckets()
1235 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); in mpt_lan_post_receive_buckets()
1237 pTrans->ContextSize = sizeof(u32); in mpt_lan_post_receive_buckets()
1238 pTrans->DetailsLength = 0; in mpt_lan_post_receive_buckets()
1239 pTrans->Flags = 0; in mpt_lan_post_receive_buckets()
1240 pTrans->TransactionContext = cpu_to_le32(ctx); in mpt_lan_post_receive_buckets()
1242 pSimple = (SGESimple64_t *) pTrans->TransactionDetails; in mpt_lan_post_receive_buckets()
1244 pSimple->FlagsLength = cpu_to_le32( in mpt_lan_post_receive_buckets()
1248 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma); in mpt_lan_post_receive_buckets()
1250 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32)); in mpt_lan_post_receive_buckets()
1252 pSimple->Address.High = 0; in mpt_lan_post_receive_buckets()
1264 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT); in mpt_lan_post_receive_buckets()
1266 pRecvReq->BucketCount = cpu_to_le32(i); in mpt_lan_post_receive_buckets()
1276 priv->total_posted += i; in mpt_lan_post_receive_buckets()
1277 buckets -= i; in mpt_lan_post_receive_buckets()
1278 atomic_add(i, &priv->buckets_out); in mpt_lan_post_receive_buckets()
1282 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", in mpt_lan_post_receive_buckets()
1283 __func__, buckets, atomic_read(&priv->buckets_out))); in mpt_lan_post_receive_buckets()
1285 __func__, priv->total_posted, priv->total_received)); in mpt_lan_post_receive_buckets()
1287 clear_bit(0, &priv->post_buckets_active); in mpt_lan_post_receive_buckets()
1304 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1316 dev->mtu = MPT_LAN_MTU; in mpt_register_lan_device()
1320 priv->dev = dev; in mpt_register_lan_device()
1321 priv->mpt_dev = mpt_dev; in mpt_register_lan_device()
1322 priv->pnum = pnum; in mpt_register_lan_device()
1324 INIT_DELAYED_WORK(&priv->post_buckets_task, in mpt_register_lan_device()
1326 priv->post_buckets_active = 0; in mpt_register_lan_device()
1329 __LINE__, dev->mtu + dev->hard_header_len + 4)); in mpt_register_lan_device()
1331 atomic_set(&priv->buckets_out, 0); in mpt_register_lan_device()
1332 priv->total_posted = 0; in mpt_register_lan_device()
1333 priv->total_received = 0; in mpt_register_lan_device()
1334 priv->max_buckets_out = max_buckets_out; in mpt_register_lan_device()
1335 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out) in mpt_register_lan_device()
1336 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets; in mpt_register_lan_device()
1340 mpt_dev->pfacts[0].MaxLanBuckets, in mpt_register_lan_device()
1342 priv->max_buckets_out)); in mpt_register_lan_device()
1344 priv->bucketthresh = priv->max_buckets_out * 2 / 3; in mpt_register_lan_device()
1345 spin_lock_init(&priv->txfidx_lock); in mpt_register_lan_device()
1346 spin_lock_init(&priv->rxfidx_lock); in mpt_register_lan_device()
1348 /* Grab pre-fetched LANPage1 stuff. :-) */ in mpt_register_lan_device()
1349 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow; in mpt_register_lan_device()
1358 dev->addr_len = FC_ALEN; in mpt_register_lan_device()
1360 memset(dev->broadcast, 0xff, FC_ALEN); in mpt_register_lan_device()
1365 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ? in mpt_register_lan_device()
1368 dev->netdev_ops = &mpt_netdev_ops; in mpt_register_lan_device()
1369 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; in mpt_register_lan_device()
1371 /* MTU range: 96 - 65280 */ in mpt_register_lan_device()
1372 dev->min_mtu = MPT_LAN_MIN_MTU; in mpt_register_lan_device()
1373 dev->max_mtu = MPT_LAN_MAX_MTU; in mpt_register_lan_device()
1392 for (i = 0; i < ioc->facts.NumberOfPorts; i++) { in mptlan_probe()
1395 ioc->name, ioc->pfacts[i].PortNumber, in mptlan_probe()
1396 ioc->pfacts[i].ProtocolFlags, in mptlan_probe()
1398 ioc->pfacts[i].ProtocolFlags)); in mptlan_probe()
1400 if (!(ioc->pfacts[i].ProtocolFlags & in mptlan_probe()
1404 ioc->name); in mptlan_probe()
1411 "port%d as a LAN device\n", ioc->name, in mptlan_probe()
1412 ioc->pfacts[i].PortNumber); in mptlan_probe()
1417 "registered as '%s'\n", ioc->name, dev->name); in mptlan_probe()
1421 dev->dev_addr); in mptlan_probe()
1423 ioc->netdev = dev; in mptlan_probe()
1428 return -ENODEV; in mptlan_probe()
1435 struct net_device *dev = ioc->netdev; in mptlan_remove()
1438 cancel_delayed_work_sync(&priv->post_buckets_task); in mptlan_remove()
1458 return -EBUSY; in mpt_lan_init()
1467 return -EBUSY; in mpt_lan_init()
1490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1494 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; in mpt_lan_type_trans()
1500 if (fch->dtype == htons(0xffff)) { in mpt_lan_type_trans()
1508 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", in mpt_lan_type_trans()
1511 fch->saddr); in mpt_lan_type_trans()
1514 if (*fch->daddr & 1) { in mpt_lan_type_trans()
1515 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) { in mpt_lan_type_trans()
1516 skb->pkt_type = PACKET_BROADCAST; in mpt_lan_type_trans()
1518 skb->pkt_type = PACKET_MULTICAST; in mpt_lan_type_trans()
1521 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) { in mpt_lan_type_trans()
1522 skb->pkt_type = PACKET_OTHERHOST; in mpt_lan_type_trans()
1524 skb->pkt_type = PACKET_HOST; in mpt_lan_type_trans()
1528 fcllc = (struct fcllc *)skb->data; in mpt_lan_type_trans()
1533 if (fcllc->dsap == EXTENDED_SAP && in mpt_lan_type_trans()
1534 (fcllc->ethertype == htons(ETH_P_IP) || in mpt_lan_type_trans()
1535 fcllc->ethertype == htons(ETH_P_ARP))) { in mpt_lan_type_trans()
1537 return fcllc->ethertype; in mpt_lan_type_trans()
1543 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/