Lines Matching +full:master +full:- +full:stats

1 // SPDX-License-Identifier: GPL-2.0-only
41 #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
42 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
101 * struct ssi_protocol - SSI protocol (McSAAB) data
133 struct timer_list keep_alive; /* wake-up workaround */
155 data = sg_virt(msg->sgt.sgl); in ssip_set_cmd()
163 data = sg_virt(msg->sgt.sgl); in ssip_get_cmd()
174 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); in ssip_skb_to_msg()
176 sg = msg->sgt.sgl; in ssip_skb_to_msg()
177 sg_set_buf(sg, skb->data, skb_headlen(skb)); in ssip_skb_to_msg()
178 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in ssip_skb_to_msg()
181 frag = &skb_shinfo(skb)->frags[i]; in ssip_skb_to_msg()
191 skb = msg->context; in ssip_free_data()
192 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, in ssip_free_data()
194 msg->destructor = NULL; in ssip_free_data()
204 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); in ssip_alloc_data()
208 msg->destructor = ssip_free_data; in ssip_alloc_data()
209 msg->channel = ssi->channel_id_data; in ssip_alloc_data()
210 msg->context = skb; in ssip_alloc_data()
217 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); in ssip_release_cmd()
219 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); in ssip_release_cmd()
220 spin_lock_bh(&ssi->lock); in ssip_release_cmd()
221 list_add_tail(&msg->link, &ssi->cmdqueue); in ssip_release_cmd()
222 spin_unlock_bh(&ssi->lock); in ssip_release_cmd()
229 BUG_ON(list_empty(&ssi->cmdqueue)); in ssip_claim_cmd()
231 spin_lock_bh(&ssi->lock); in ssip_claim_cmd()
232 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); in ssip_claim_cmd()
233 list_del(&msg->link); in ssip_claim_cmd()
234 spin_unlock_bh(&ssi->lock); in ssip_claim_cmd()
235 msg->destructor = ssip_release_cmd; in ssip_claim_cmd()
244 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { in ssip_free_cmds()
245 list_del(&msg->link); in ssip_free_cmds()
246 msg->destructor = NULL; in ssip_free_cmds()
247 kfree(sg_virt(msg->sgt.sgl)); in ssip_free_cmds()
267 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); in ssip_alloc_cmds()
268 msg->channel = ssi->channel_id_cmd; in ssip_alloc_cmds()
269 list_add_tail(&msg->link, &ssi->cmdqueue); in ssip_alloc_cmds()
276 return -ENOMEM; in ssip_alloc_cmds()
281 ssi->recv_state = state; in ssip_set_rxstate()
284 del_timer(&ssi->rx_wd); in ssip_set_rxstate()
285 if (ssi->send_state == SEND_IDLE) in ssip_set_rxstate()
286 del_timer(&ssi->keep_alive); in ssip_set_rxstate()
290 if (atomic_read(&ssi->tx_usecnt)) in ssip_set_rxstate()
294 mod_timer(&ssi->keep_alive, jiffies + in ssip_set_rxstate()
296 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); in ssip_set_rxstate()
305 ssi->send_state = state; in ssip_set_txstate()
309 del_timer(&ssi->tx_wd); in ssip_set_txstate()
310 if (ssi->recv_state == RECV_IDLE) in ssip_set_txstate()
311 del_timer(&ssi->keep_alive); in ssip_set_txstate()
316 mod_timer(&ssi->keep_alive, in ssip_set_txstate()
318 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); in ssip_set_txstate()
327 struct hsi_client *master = ERR_PTR(-ENODEV); in ssip_slave_get_master() local
331 if (slave->device.parent == ssi->cl->device.parent) { in ssip_slave_get_master()
332 master = ssi->cl; in ssip_slave_get_master()
336 return master; in ssip_slave_get_master()
340 int ssip_slave_start_tx(struct hsi_client *master) in ssip_slave_start_tx() argument
342 struct ssi_protocol *ssi = hsi_client_drvdata(master); in ssip_slave_start_tx()
344 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); in ssip_slave_start_tx()
345 spin_lock_bh(&ssi->lock); in ssip_slave_start_tx()
346 if (ssi->send_state == SEND_IDLE) { in ssip_slave_start_tx()
348 hsi_start_tx(master); in ssip_slave_start_tx()
350 spin_unlock_bh(&ssi->lock); in ssip_slave_start_tx()
351 atomic_inc(&ssi->tx_usecnt); in ssip_slave_start_tx()
357 int ssip_slave_stop_tx(struct hsi_client *master) in ssip_slave_stop_tx() argument
359 struct ssi_protocol *ssi = hsi_client_drvdata(master); in ssip_slave_stop_tx()
361 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); in ssip_slave_stop_tx()
363 if (atomic_dec_and_test(&ssi->tx_usecnt)) { in ssip_slave_stop_tx()
364 spin_lock_bh(&ssi->lock); in ssip_slave_stop_tx()
365 if ((ssi->send_state == SEND_READY) || in ssip_slave_stop_tx()
366 (ssi->send_state == WAIT4READY)) { in ssip_slave_stop_tx()
368 hsi_stop_tx(master); in ssip_slave_stop_tx()
370 spin_unlock_bh(&ssi->lock); in ssip_slave_stop_tx()
372 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); in ssip_slave_stop_tx()
378 int ssip_slave_running(struct hsi_client *master) in ssip_slave_running() argument
380 struct ssi_protocol *ssi = hsi_client_drvdata(master); in ssip_slave_running()
381 return netif_running(ssi->netdev); in ssip_slave_running()
391 if (netif_running(ssi->netdev)) in ssip_reset()
392 netif_carrier_off(ssi->netdev); in ssip_reset()
394 spin_lock_bh(&ssi->lock); in ssip_reset()
395 if (ssi->send_state != SEND_IDLE) in ssip_reset()
397 spin_unlock_bh(&ssi->lock); in ssip_reset()
398 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) in ssip_reset()
400 spin_lock_bh(&ssi->lock); in ssip_reset()
401 del_timer(&ssi->rx_wd); in ssip_reset()
402 del_timer(&ssi->tx_wd); in ssip_reset()
403 del_timer(&ssi->keep_alive); in ssip_reset()
404 ssi->main_state = 0; in ssip_reset()
405 ssi->send_state = 0; in ssip_reset()
406 ssi->recv_state = 0; in ssip_reset()
407 ssi->flags = 0; in ssip_reset()
408 ssi->rxid = 0; in ssip_reset()
409 ssi->txid = 0; in ssip_reset()
410 list_for_each_safe(head, tmp, &ssi->txqueue) { in ssip_reset()
412 dev_dbg(&cl->device, "Pending TX data\n"); in ssip_reset()
416 ssi->txqueue_len = 0; in ssip_reset()
417 spin_unlock_bh(&ssi->lock); in ssip_reset()
425 spin_lock_bh(&ssi->lock); in ssip_dump_state()
426 dev_err(&cl->device, "Main state: %d\n", ssi->main_state); in ssip_dump_state()
427 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); in ssip_dump_state()
428 dev_err(&cl->device, "Send state: %d\n", ssi->send_state); in ssip_dump_state()
429 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? in ssip_dump_state()
431 dev_err(&cl->device, "Wake test %d\n", in ssip_dump_state()
432 test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); in ssip_dump_state()
433 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); in ssip_dump_state()
434 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); in ssip_dump_state()
436 list_for_each_entry(msg, &ssi->txqueue, link) in ssip_dump_state()
437 dev_err(&cl->device, "pending TX data (%p)\n", msg); in ssip_dump_state()
438 spin_unlock_bh(&ssi->lock); in ssip_dump_state()
449 msg->complete = ssip_rxcmd_complete; in ssip_error()
456 struct hsi_client *cl = ssi->cl; in ssip_keep_alive()
458 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", in ssip_keep_alive()
459 ssi->main_state, ssi->recv_state, ssi->send_state); in ssip_keep_alive()
461 spin_lock(&ssi->lock); in ssip_keep_alive()
462 if (ssi->recv_state == RECV_IDLE) in ssip_keep_alive()
463 switch (ssi->send_state) { in ssip_keep_alive()
465 if (atomic_read(&ssi->tx_usecnt) == 0) in ssip_keep_alive()
469 * Workaround for cmt-speech in that case in ssip_keep_alive()
473 spin_unlock(&ssi->lock); in ssip_keep_alive()
476 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); in ssip_keep_alive()
477 spin_unlock(&ssi->lock); in ssip_keep_alive()
483 struct hsi_client *cl = ssi->cl; in ssip_rx_wd()
485 dev_err(&cl->device, "Watchdog triggered\n"); in ssip_rx_wd()
492 struct hsi_client *cl = ssi->cl; in ssip_tx_wd()
494 dev_err(&cl->device, "Watchdog triggered\n"); in ssip_tx_wd()
503 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); in ssip_send_bootinfo_req_cmd()
506 msg->complete = ssip_release_cmd; in ssip_send_bootinfo_req_cmd()
508 dev_dbg(&cl->device, "Issuing RX command\n"); in ssip_send_bootinfo_req_cmd()
510 msg->complete = ssip_rxcmd_complete; in ssip_send_bootinfo_req_cmd()
519 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, in ssip_start_rx()
520 ssi->recv_state); in ssip_start_rx()
521 spin_lock_bh(&ssi->lock); in ssip_start_rx()
526 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { in ssip_start_rx()
527 spin_unlock_bh(&ssi->lock); in ssip_start_rx()
531 spin_unlock_bh(&ssi->lock); in ssip_start_rx()
535 msg->complete = ssip_release_cmd; in ssip_start_rx()
536 dev_dbg(&cl->device, "Send READY\n"); in ssip_start_rx()
544 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); in ssip_stop_rx()
545 spin_lock_bh(&ssi->lock); in ssip_stop_rx()
546 if (likely(ssi->main_state == ACTIVE)) in ssip_stop_rx()
548 spin_unlock_bh(&ssi->lock); in ssip_stop_rx()
553 ssip_free_data(msg->context); in ssip_free_strans()
559 struct hsi_client *cl = msg->cl; in ssip_strans_complete()
563 data = msg->context; in ssip_strans_complete()
565 spin_lock_bh(&ssi->lock); in ssip_strans_complete()
567 spin_unlock_bh(&ssi->lock); in ssip_strans_complete()
577 spin_lock_bh(&ssi->lock); in ssip_xmit()
578 if (list_empty(&ssi->txqueue)) { in ssip_xmit()
579 spin_unlock_bh(&ssi->lock); in ssip_xmit()
582 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); in ssip_xmit()
583 list_del(&dmsg->link); in ssip_xmit()
584 ssi->txqueue_len--; in ssip_xmit()
585 spin_unlock_bh(&ssi->lock); in ssip_xmit()
588 skb = dmsg->context; in ssip_xmit()
589 msg->context = dmsg; in ssip_xmit()
590 msg->complete = ssip_strans_complete; in ssip_xmit()
591 msg->destructor = ssip_free_strans; in ssip_xmit()
593 spin_lock_bh(&ssi->lock); in ssip_xmit()
594 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), in ssip_xmit()
595 ssi->txid)); in ssip_xmit()
596 ssi->txid++; in ssip_xmit()
598 spin_unlock_bh(&ssi->lock); in ssip_xmit()
600 dev_dbg(&cl->device, "Send STRANS (%d frames)\n", in ssip_xmit()
601 SSIP_BYTES_TO_FRAMES(skb->len)); in ssip_xmit()
609 struct net_device *dev = skb->dev; in ssip_pn_rx()
612 dev_dbg(&dev->dev, "Drop RX packet\n"); in ssip_pn_rx()
613 dev->stats.rx_dropped++; in ssip_pn_rx()
618 dev_dbg(&dev->dev, "Error drop RX packet\n"); in ssip_pn_rx()
619 dev->stats.rx_errors++; in ssip_pn_rx()
620 dev->stats.rx_length_errors++; in ssip_pn_rx()
624 dev->stats.rx_packets++; in ssip_pn_rx()
625 dev->stats.rx_bytes += skb->len; in ssip_pn_rx()
628 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); in ssip_pn_rx()
629 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", in ssip_pn_rx()
630 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); in ssip_pn_rx()
632 skb->protocol = htons(ETH_P_PHONET); in ssip_pn_rx()
640 struct hsi_client *cl = msg->cl; in ssip_rx_data_complete()
644 if (msg->status == HSI_STATUS_ERROR) { in ssip_rx_data_complete()
645 dev_err(&cl->device, "RX data error\n"); in ssip_rx_data_complete()
650 del_timer(&ssi->rx_wd); /* FIXME: Revisit */ in ssip_rx_data_complete()
651 skb = msg->context; in ssip_rx_data_complete()
665 switch (ssi->main_state) { in ssip_rx_bootinforeq()
667 dev_err(&cl->device, "Boot info req on active state\n"); in ssip_rx_bootinforeq()
672 spin_lock_bh(&ssi->lock); in ssip_rx_bootinforeq()
673 ssi->main_state = HANDSHAKE; in ssip_rx_bootinforeq()
674 spin_unlock_bh(&ssi->lock); in ssip_rx_bootinforeq()
676 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) in ssip_rx_bootinforeq()
679 spin_lock_bh(&ssi->lock); in ssip_rx_bootinforeq()
681 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); in ssip_rx_bootinforeq()
682 spin_unlock_bh(&ssi->lock); in ssip_rx_bootinforeq()
683 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); in ssip_rx_bootinforeq()
685 dev_warn(&cl->device, "boot info req verid mismatch\n"); in ssip_rx_bootinforeq()
688 msg->complete = ssip_release_cmd; in ssip_rx_bootinforeq()
692 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); in ssip_rx_bootinforeq()
702 dev_warn(&cl->device, "boot info resp verid mismatch\n"); in ssip_rx_bootinforesp()
704 spin_lock_bh(&ssi->lock); in ssip_rx_bootinforesp()
705 if (ssi->main_state != ACTIVE) in ssip_rx_bootinforesp()
707 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); in ssip_rx_bootinforesp()
709 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", in ssip_rx_bootinforesp()
710 ssi->main_state); in ssip_rx_bootinforesp()
711 spin_unlock_bh(&ssi->lock); in ssip_rx_bootinforesp()
719 spin_lock_bh(&ssi->lock); in ssip_rx_waketest()
720 if (ssi->main_state != HANDSHAKE) { in ssip_rx_waketest()
721 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", in ssip_rx_waketest()
722 ssi->main_state); in ssip_rx_waketest()
723 spin_unlock_bh(&ssi->lock); in ssip_rx_waketest()
726 spin_unlock_bh(&ssi->lock); in ssip_rx_waketest()
728 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) in ssip_rx_waketest()
731 spin_lock_bh(&ssi->lock); in ssip_rx_waketest()
732 ssi->main_state = ACTIVE; in ssip_rx_waketest()
733 del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ in ssip_rx_waketest()
734 spin_unlock_bh(&ssi->lock); in ssip_rx_waketest()
736 dev_notice(&cl->device, "WAKELINES TEST %s\n", in ssip_rx_waketest()
742 dev_dbg(&cl->device, "CMT is ONLINE\n"); in ssip_rx_waketest()
743 netif_wake_queue(ssi->netdev); in ssip_rx_waketest()
744 netif_carrier_on(ssi->netdev); in ssip_rx_waketest()
751 spin_lock_bh(&ssi->lock); in ssip_rx_ready()
752 if (unlikely(ssi->main_state != ACTIVE)) { in ssip_rx_ready()
753 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", in ssip_rx_ready()
754 ssi->send_state, ssi->main_state); in ssip_rx_ready()
755 spin_unlock_bh(&ssi->lock); in ssip_rx_ready()
758 if (ssi->send_state != WAIT4READY) { in ssip_rx_ready()
759 dev_dbg(&cl->device, "Ignore spurious READY command\n"); in ssip_rx_ready()
760 spin_unlock_bh(&ssi->lock); in ssip_rx_ready()
764 spin_unlock_bh(&ssi->lock); in ssip_rx_ready()
775 dev_dbg(&cl->device, "RX strans: %d frames\n", len); in ssip_rx_strans()
776 spin_lock_bh(&ssi->lock); in ssip_rx_strans()
777 if (unlikely(ssi->main_state != ACTIVE)) { in ssip_rx_strans()
778 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", in ssip_rx_strans()
779 ssi->send_state, ssi->main_state); in ssip_rx_strans()
780 spin_unlock_bh(&ssi->lock); in ssip_rx_strans()
784 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { in ssip_rx_strans()
785 dev_err(&cl->device, "START TRANS id %d expected %d\n", in ssip_rx_strans()
786 SSIP_MSG_ID(cmd), ssi->rxid); in ssip_rx_strans()
787 spin_unlock_bh(&ssi->lock); in ssip_rx_strans()
790 ssi->rxid++; in ssip_rx_strans()
791 spin_unlock_bh(&ssi->lock); in ssip_rx_strans()
792 skb = netdev_alloc_skb(ssi->netdev, len * 4); in ssip_rx_strans()
794 dev_err(&cl->device, "No memory for rx skb\n"); in ssip_rx_strans()
800 dev_err(&cl->device, "No memory for RX data msg\n"); in ssip_rx_strans()
803 msg->complete = ssip_rx_data_complete; in ssip_rx_strans()
815 struct hsi_client *cl = msg->cl; in ssip_rxcmd_complete()
819 if (msg->status == HSI_STATUS_ERROR) { in ssip_rxcmd_complete()
820 dev_err(&cl->device, "RX error detected\n"); in ssip_rxcmd_complete()
826 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); in ssip_rxcmd_complete()
847 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); in ssip_rxcmd_complete()
854 struct hsi_client *cl = msg->cl; in ssip_swbreak_complete()
858 spin_lock_bh(&ssi->lock); in ssip_swbreak_complete()
859 if (list_empty(&ssi->txqueue)) { in ssip_swbreak_complete()
860 if (atomic_read(&ssi->tx_usecnt)) { in ssip_swbreak_complete()
866 spin_unlock_bh(&ssi->lock); in ssip_swbreak_complete()
868 spin_unlock_bh(&ssi->lock); in ssip_swbreak_complete()
871 netif_wake_queue(ssi->netdev); in ssip_swbreak_complete()
876 struct hsi_client *cl = msg->cl; in ssip_tx_data_complete()
880 if (msg->status == HSI_STATUS_ERROR) { in ssip_tx_data_complete()
881 dev_err(&cl->device, "TX data error\n"); in ssip_tx_data_complete()
885 spin_lock_bh(&ssi->lock); in ssip_tx_data_complete()
886 if (list_empty(&ssi->txqueue)) { in ssip_tx_data_complete()
888 spin_unlock_bh(&ssi->lock); in ssip_tx_data_complete()
891 cmsg->complete = ssip_swbreak_complete; in ssip_tx_data_complete()
892 dev_dbg(&cl->device, "Send SWBREAK\n"); in ssip_tx_data_complete()
895 spin_unlock_bh(&ssi->lock); in ssip_tx_data_complete()
918 struct hsi_client *cl = to_hsi_client(dev->dev.parent); in ssip_pn_open()
924 dev_err(&cl->device, "SSI port already claimed\n"); in ssip_pn_open()
929 dev_err(&cl->device, "Register HSI port event failed (%d)\n", in ssip_pn_open()
934 dev_dbg(&cl->device, "Configuring SSI port\n"); in ssip_pn_open()
937 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) in ssip_pn_open()
940 spin_lock_bh(&ssi->lock); in ssip_pn_open()
941 ssi->main_state = HANDSHAKE; in ssip_pn_open()
942 spin_unlock_bh(&ssi->lock); in ssip_pn_open()
951 struct hsi_client *cl = to_hsi_client(dev->dev.parent); in ssip_pn_stop()
964 struct hsi_client *cl = ssi->cl; in ssip_xmit_work()
971 struct hsi_client *cl = to_hsi_client(dev->dev.parent); in ssip_pn_xmit()
975 if ((skb->protocol != htons(ETH_P_PHONET)) || in ssip_pn_xmit()
976 (skb->len < SSIP_MIN_PN_HDR)) in ssip_pn_xmit()
978 /* Pad to 32-bits - FIXME: Revisit*/ in ssip_pn_xmit()
979 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) in ssip_pn_xmit()
990 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); in ssip_pn_xmit()
994 dev_dbg(&cl->device, "Dropping tx data: No memory\n"); in ssip_pn_xmit()
997 msg->complete = ssip_tx_data_complete; in ssip_pn_xmit()
999 spin_lock_bh(&ssi->lock); in ssip_pn_xmit()
1000 if (unlikely(ssi->main_state != ACTIVE)) { in ssip_pn_xmit()
1001 spin_unlock_bh(&ssi->lock); in ssip_pn_xmit()
1002 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); in ssip_pn_xmit()
1005 list_add_tail(&msg->link, &ssi->txqueue); in ssip_pn_xmit()
1006 ssi->txqueue_len++; in ssip_pn_xmit()
1007 if (dev->tx_queue_len < ssi->txqueue_len) { in ssip_pn_xmit()
1008 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); in ssip_pn_xmit()
1011 if (ssi->send_state == SEND_IDLE) { in ssip_pn_xmit()
1013 spin_unlock_bh(&ssi->lock); in ssip_pn_xmit()
1014 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); in ssip_pn_xmit()
1016 } else if (ssi->send_state == SEND_READY) { in ssip_pn_xmit()
1017 /* Needed for cmt-speech workaround */ in ssip_pn_xmit()
1018 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", in ssip_pn_xmit()
1019 ssi->txqueue_len); in ssip_pn_xmit()
1020 spin_unlock_bh(&ssi->lock); in ssip_pn_xmit()
1021 schedule_work(&ssi->work); in ssip_pn_xmit()
1023 spin_unlock_bh(&ssi->lock); in ssip_pn_xmit()
1025 dev->stats.tx_packets++; in ssip_pn_xmit()
1026 dev->stats.tx_bytes += skb->len; in ssip_pn_xmit()
1034 dev->stats.tx_dropped++; in ssip_pn_xmit()
1040 void ssip_reset_event(struct hsi_client *master) in ssip_reset_event() argument
1042 struct ssi_protocol *ssi = hsi_client_drvdata(master); in ssip_reset_event()
1043 dev_err(&ssi->cl->device, "CMT reset detected!\n"); in ssip_reset_event()
1044 ssip_error(ssi->cl); in ssip_reset_event()
1058 dev->features = 0; in ssip_pn_setup()
1059 dev->netdev_ops = &ssip_pn_ops; in ssip_pn_setup()
1060 dev->type = ARPHRD_PHONET; in ssip_pn_setup()
1061 dev->flags = IFF_POINTOPOINT | IFF_NOARP; in ssip_pn_setup()
1062 dev->mtu = SSIP_DEFAULT_MTU; in ssip_pn_setup()
1063 dev->hard_header_len = 1; in ssip_pn_setup()
1064 dev->addr_len = 1; in ssip_pn_setup()
1066 dev->tx_queue_len = SSIP_TXQUEUE_LEN; in ssip_pn_setup()
1068 dev->needs_free_netdev = true; in ssip_pn_setup()
1069 dev->header_ops = &phonet_header_ops; in ssip_pn_setup()
1081 return -ENOMEM; in ssi_protocol_probe()
1083 spin_lock_init(&ssi->lock); in ssi_protocol_probe()
1084 timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); in ssi_protocol_probe()
1085 timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); in ssi_protocol_probe()
1086 timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); in ssi_protocol_probe()
1087 INIT_LIST_HEAD(&ssi->txqueue); in ssi_protocol_probe()
1088 INIT_LIST_HEAD(&ssi->cmdqueue); in ssi_protocol_probe()
1089 atomic_set(&ssi->tx_usecnt, 0); in ssi_protocol_probe()
1091 ssi->cl = cl; in ssi_protocol_probe()
1092 INIT_WORK(&ssi->work, ssip_xmit_work); in ssi_protocol_probe()
1094 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); in ssi_protocol_probe()
1095 if (ssi->channel_id_cmd < 0) { in ssi_protocol_probe()
1096 err = ssi->channel_id_cmd; in ssi_protocol_probe()
1101 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); in ssi_protocol_probe()
1102 if (ssi->channel_id_data < 0) { in ssi_protocol_probe()
1103 err = ssi->channel_id_data; in ssi_protocol_probe()
1114 ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup); in ssi_protocol_probe()
1115 if (!ssi->netdev) { in ssi_protocol_probe()
1117 err = -ENOMEM; in ssi_protocol_probe()
1121 /* MTU range: 6 - 65535 */ in ssi_protocol_probe()
1122 ssi->netdev->min_mtu = PHONET_MIN_MTU; in ssi_protocol_probe()
1123 ssi->netdev->max_mtu = SSIP_MAX_MTU; in ssi_protocol_probe()
1125 SET_NETDEV_DEV(ssi->netdev, dev); in ssi_protocol_probe()
1126 netif_carrier_off(ssi->netdev); in ssi_protocol_probe()
1127 err = register_netdev(ssi->netdev); in ssi_protocol_probe()
1133 list_add(&ssi->link, &ssip_list); in ssi_protocol_probe()
1136 ssi->channel_id_cmd, ssi->channel_id_data); in ssi_protocol_probe()
1140 free_netdev(ssi->netdev); in ssi_protocol_probe()
1154 list_del(&ssi->link); in ssi_protocol_remove()
1155 unregister_netdev(ssi->netdev); in ssi_protocol_remove()
1165 .name = "ssi-protocol",
1187 MODULE_ALIAS("hsi:ssi-protocol");
1189 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");