Lines Matching full:net
24 #include <net/ip6_checksum.h>
240 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_login_response() argument
244 struct tb_xdomain *xd = net->xd; in tbnet_login_response()
250 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); in tbnet_login_response()
257 static int tbnet_login_request(struct tbnet *net, u8 sequence) in tbnet_login_request() argument
261 struct tb_xdomain *xd = net->xd; in tbnet_login_request()
266 atomic_inc_return(&net->command_id)); in tbnet_login_request()
269 request.transmit_path = net->local_transmit_path; in tbnet_login_request()
277 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_logout_response() argument
281 struct tb_xdomain *xd = net->xd; in tbnet_logout_response()
286 atomic_inc_return(&net->command_id)); in tbnet_logout_response()
291 static int tbnet_logout_request(struct tbnet *net) in tbnet_logout_request() argument
295 struct tb_xdomain *xd = net->xd; in tbnet_logout_request()
300 atomic_inc_return(&net->command_id)); in tbnet_logout_request()
308 static void start_login(struct tbnet *net) in start_login() argument
310 netdev_dbg(net->dev, "login started\n"); in start_login()
312 mutex_lock(&net->connection_lock); in start_login()
313 net->login_sent = false; in start_login()
314 net->login_received = false; in start_login()
315 mutex_unlock(&net->connection_lock); in start_login()
317 queue_delayed_work(system_long_wq, &net->login_work, in start_login()
321 static void stop_login(struct tbnet *net) in stop_login() argument
323 cancel_delayed_work_sync(&net->login_work); in stop_login()
324 cancel_work_sync(&net->connected_work); in stop_login()
326 netdev_dbg(net->dev, "login stopped\n"); in stop_login()
372 static void tbnet_tear_down(struct tbnet *net, bool send_logout) in tbnet_tear_down() argument
374 netif_carrier_off(net->dev); in tbnet_tear_down()
375 netif_stop_queue(net->dev); in tbnet_tear_down()
377 stop_login(net); in tbnet_tear_down()
379 mutex_lock(&net->connection_lock); in tbnet_tear_down()
381 if (net->login_sent && net->login_received) { in tbnet_tear_down()
385 netdev_dbg(net->dev, "sending logout request %u\n", in tbnet_tear_down()
387 ret = tbnet_logout_request(net); in tbnet_tear_down()
392 tb_ring_stop(net->rx_ring.ring); in tbnet_tear_down()
393 tb_ring_stop(net->tx_ring.ring); in tbnet_tear_down()
394 tbnet_free_buffers(&net->rx_ring); in tbnet_tear_down()
395 tbnet_free_buffers(&net->tx_ring); in tbnet_tear_down()
397 ret = tb_xdomain_disable_paths(net->xd, in tbnet_tear_down()
398 net->local_transmit_path, in tbnet_tear_down()
399 net->rx_ring.ring->hop, in tbnet_tear_down()
400 net->remote_transmit_path, in tbnet_tear_down()
401 net->tx_ring.ring->hop); in tbnet_tear_down()
403 netdev_warn(net->dev, "failed to disable DMA paths\n"); in tbnet_tear_down()
405 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_tear_down()
406 net->remote_transmit_path = 0; in tbnet_tear_down()
409 net->login_retries = 0; in tbnet_tear_down()
410 net->login_sent = false; in tbnet_tear_down()
411 net->login_received = false; in tbnet_tear_down()
413 netdev_dbg(net->dev, "network traffic stopped\n"); in tbnet_tear_down()
415 mutex_unlock(&net->connection_lock); in tbnet_tear_down()
421 struct tbnet *net = data; in tbnet_handle_packet() local
430 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
432 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
437 if (route != net->xd->route) in tbnet_handle_packet()
446 netdev_dbg(net->dev, "remote login request received\n"); in tbnet_handle_packet()
447 if (!netif_running(net->dev)) in tbnet_handle_packet()
450 ret = tbnet_login_response(net, route, sequence, in tbnet_handle_packet()
453 netdev_dbg(net->dev, "remote login response sent\n"); in tbnet_handle_packet()
455 mutex_lock(&net->connection_lock); in tbnet_handle_packet()
456 net->login_received = true; in tbnet_handle_packet()
457 net->remote_transmit_path = pkg->transmit_path; in tbnet_handle_packet()
463 if (net->login_retries >= TBNET_LOGIN_RETRIES || in tbnet_handle_packet()
464 !net->login_sent) { in tbnet_handle_packet()
465 net->login_retries = 0; in tbnet_handle_packet()
467 &net->login_work, 0); in tbnet_handle_packet()
469 mutex_unlock(&net->connection_lock); in tbnet_handle_packet()
471 queue_work(system_long_wq, &net->connected_work); in tbnet_handle_packet()
476 netdev_dbg(net->dev, "remote logout request received\n"); in tbnet_handle_packet()
477 ret = tbnet_logout_response(net, route, sequence, command_id); in tbnet_handle_packet()
479 netdev_dbg(net->dev, "remote logout response sent\n"); in tbnet_handle_packet()
480 queue_work(system_long_wq, &net->disconnect_work); in tbnet_handle_packet()
489 netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); in tbnet_handle_packet()
499 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) in tbnet_alloc_rx_buffers() argument
501 struct tbnet_ring *ring = &net->rx_ring; in tbnet_alloc_rx_buffers()
531 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
548 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) in tbnet_get_tx_buffer() argument
550 struct tbnet_ring *ring = &net->tx_ring; in tbnet_get_tx_buffer()
573 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback() local
576 net->tx_ring.prod++; in tbnet_tx_callback()
578 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) in tbnet_tx_callback()
579 netif_wake_queue(net->dev); in tbnet_tx_callback()
582 static int tbnet_alloc_tx_buffers(struct tbnet *net) in tbnet_alloc_tx_buffers() argument
584 struct tbnet_ring *ring = &net->tx_ring; in tbnet_alloc_tx_buffers()
607 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
624 struct tbnet *net = container_of(work, typeof(*net), connected_work); in tbnet_connected_work() local
628 if (netif_carrier_ok(net->dev)) in tbnet_connected_work()
631 mutex_lock(&net->connection_lock); in tbnet_connected_work()
632 connected = net->login_sent && net->login_received; in tbnet_connected_work()
633 mutex_unlock(&net->connection_lock); in tbnet_connected_work()
638 netdev_dbg(net->dev, "login successful, enabling paths\n"); in tbnet_connected_work()
640 ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
641 if (ret != net->remote_transmit_path) { in tbnet_connected_work()
642 netdev_err(net->dev, "failed to allocate Rx HopID\n"); in tbnet_connected_work()
653 tb_ring_start(net->tx_ring.ring); in tbnet_connected_work()
654 tb_ring_start(net->rx_ring.ring); in tbnet_connected_work()
656 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); in tbnet_connected_work()
660 ret = tbnet_alloc_tx_buffers(net); in tbnet_connected_work()
664 ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, in tbnet_connected_work()
665 net->rx_ring.ring->hop, in tbnet_connected_work()
666 net->remote_transmit_path, in tbnet_connected_work()
667 net->tx_ring.ring->hop); in tbnet_connected_work()
669 netdev_err(net->dev, "failed to enable DMA paths\n"); in tbnet_connected_work()
673 netif_carrier_on(net->dev); in tbnet_connected_work()
674 netif_start_queue(net->dev); in tbnet_connected_work()
676 netdev_dbg(net->dev, "network traffic started\n"); in tbnet_connected_work()
680 tbnet_free_buffers(&net->tx_ring); in tbnet_connected_work()
682 tbnet_free_buffers(&net->rx_ring); in tbnet_connected_work()
684 tb_ring_stop(net->rx_ring.ring); in tbnet_connected_work()
685 tb_ring_stop(net->tx_ring.ring); in tbnet_connected_work()
686 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
691 struct tbnet *net = container_of(work, typeof(*net), login_work.work); in tbnet_login_work() local
695 if (netif_carrier_ok(net->dev)) in tbnet_login_work()
698 netdev_dbg(net->dev, "sending login request, retries=%u\n", in tbnet_login_work()
699 net->login_retries); in tbnet_login_work()
701 ret = tbnet_login_request(net, net->login_retries % 4); in tbnet_login_work()
703 netdev_dbg(net->dev, "sending login request failed, ret=%d\n", in tbnet_login_work()
705 if (net->login_retries++ < TBNET_LOGIN_RETRIES) { in tbnet_login_work()
706 queue_delayed_work(system_long_wq, &net->login_work, in tbnet_login_work()
709 netdev_info(net->dev, "ThunderboltIP login timed out\n"); in tbnet_login_work()
712 netdev_dbg(net->dev, "received login reply\n"); in tbnet_login_work()
714 net->login_retries = 0; in tbnet_login_work()
716 mutex_lock(&net->connection_lock); in tbnet_login_work()
717 net->login_sent = true; in tbnet_login_work()
718 mutex_unlock(&net->connection_lock); in tbnet_login_work()
720 queue_work(system_long_wq, &net->connected_work); in tbnet_login_work()
726 struct tbnet *net = container_of(work, typeof(*net), disconnect_work); in tbnet_disconnect_work() local
728 tbnet_tear_down(net, false); in tbnet_disconnect_work()
731 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
738 net->stats.rx_crc_errors++; in tbnet_check_frame()
741 net->stats.rx_over_errors++; in tbnet_check_frame()
748 net->stats.rx_length_errors++; in tbnet_check_frame()
758 net->stats.rx_length_errors++; in tbnet_check_frame()
765 if (net->skb && net->rx_hdr.frame_count) { in tbnet_check_frame()
767 if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) { in tbnet_check_frame()
768 net->stats.rx_length_errors++; in tbnet_check_frame()
775 if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 || in tbnet_check_frame()
776 frame_id != le16_to_cpu(net->rx_hdr.frame_id)) { in tbnet_check_frame()
777 net->stats.rx_missed_errors++; in tbnet_check_frame()
781 if (net->skb->len + frame_size > TBNET_MAX_MTU) { in tbnet_check_frame()
782 net->stats.rx_length_errors++; in tbnet_check_frame()
791 net->stats.rx_length_errors++; in tbnet_check_frame()
795 net->stats.rx_missed_errors++; in tbnet_check_frame()
804 struct tbnet *net = container_of(napi, struct tbnet, napi); in tbnet_poll() local
805 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); in tbnet_poll()
806 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); in tbnet_poll()
824 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
828 frame = tb_ring_poll(net->rx_ring.ring); in tbnet_poll()
839 net->rx_ring.cons++; in tbnet_poll()
843 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()
847 dev_kfree_skb_any(net->skb); in tbnet_poll()
848 net->skb = NULL; in tbnet_poll()
856 skb = net->skb; in tbnet_poll()
862 net->stats.rx_errors++; in tbnet_poll()
869 net->skb = skb; in tbnet_poll()
876 net->rx_hdr.frame_size = hdr->frame_size; in tbnet_poll()
877 net->rx_hdr.frame_count = hdr->frame_count; in tbnet_poll()
878 net->rx_hdr.frame_index = hdr->frame_index; in tbnet_poll()
879 net->rx_hdr.frame_id = hdr->frame_id; in tbnet_poll()
880 last = le16_to_cpu(net->rx_hdr.frame_index) == in tbnet_poll()
881 le32_to_cpu(net->rx_hdr.frame_count) - 1; in tbnet_poll()
884 net->stats.rx_bytes += frame_size; in tbnet_poll()
887 skb->protocol = eth_type_trans(skb, net->dev); in tbnet_poll()
889 napi_gro_receive(&net->napi, skb); in tbnet_poll()
890 net->skb = NULL; in tbnet_poll()
894 net->stats.rx_packets += rx_packets; in tbnet_poll()
897 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
904 tb_ring_poll_complete(net->rx_ring.ring); in tbnet_poll()
911 struct tbnet *net = data; in tbnet_start_poll() local
913 napi_schedule(&net->napi); in tbnet_start_poll()
918 struct tbnet *net = netdev_priv(dev); in tbnet_open() local
919 struct tb_xdomain *xd = net->xd; in tbnet_open()
933 net->tx_ring.ring = ring; in tbnet_open()
938 tb_ring_free(net->tx_ring.ring); in tbnet_open()
939 net->tx_ring.ring = NULL; in tbnet_open()
942 net->local_transmit_path = hopid; in tbnet_open()
949 if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) in tbnet_open()
953 net->tx_ring.ring->hop, sof_mask, in tbnet_open()
954 eof_mask, tbnet_start_poll, net); in tbnet_open()
958 tb_ring_free(net->tx_ring.ring); in tbnet_open()
959 net->tx_ring.ring = NULL; in tbnet_open()
962 net->rx_ring.ring = ring; in tbnet_open()
964 napi_enable(&net->napi); in tbnet_open()
965 start_login(net); in tbnet_open()
972 struct tbnet *net = netdev_priv(dev); in tbnet_stop() local
974 napi_disable(&net->napi); in tbnet_stop()
976 cancel_work_sync(&net->disconnect_work); in tbnet_stop()
977 tbnet_tear_down(net, true); in tbnet_stop()
979 tb_ring_free(net->rx_ring.ring); in tbnet_stop()
980 net->rx_ring.ring = NULL; in tbnet_stop()
982 tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path); in tbnet_stop()
983 tb_ring_free(net->tx_ring.ring); in tbnet_stop()
984 net->tx_ring.ring = NULL; in tbnet_stop()
989 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, in tbnet_xmit_csum_and_map() argument
993 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); in tbnet_xmit_csum_and_map()
1106 struct tbnet *net = netdev_priv(dev); in tbnet_start_xmit() local
1108 u16 frame_id = atomic_read(&net->frame_id); in tbnet_start_xmit()
1122 if (tbnet_available_buffers(&net->tx_ring) < nframes) { in tbnet_start_xmit()
1123 netif_stop_queue(net->dev); in tbnet_start_xmit()
1127 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1177 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1215 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) in tbnet_start_xmit()
1219 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); in tbnet_start_xmit()
1221 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) in tbnet_start_xmit()
1222 atomic_inc(&net->frame_id); in tbnet_start_xmit()
1224 net->stats.tx_packets++; in tbnet_start_xmit()
1225 net->stats.tx_bytes += skb->len; in tbnet_start_xmit()
1234 net->tx_ring.cons -= frame_index; in tbnet_start_xmit()
1237 net->stats.tx_errors++; in tbnet_start_xmit()
1245 struct tbnet *net = netdev_priv(dev); in tbnet_get_stats64() local
1247 stats->tx_packets = net->stats.tx_packets; in tbnet_get_stats64()
1248 stats->rx_packets = net->stats.rx_packets; in tbnet_get_stats64()
1249 stats->tx_bytes = net->stats.tx_bytes; in tbnet_get_stats64()
1250 stats->rx_bytes = net->stats.rx_bytes; in tbnet_get_stats64()
1251 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + in tbnet_get_stats64()
1252 net->stats.rx_over_errors + net->stats.rx_crc_errors + in tbnet_get_stats64()
1253 net->stats.rx_missed_errors; in tbnet_get_stats64()
1254 stats->tx_errors = net->stats.tx_errors; in tbnet_get_stats64()
1255 stats->rx_length_errors = net->stats.rx_length_errors; in tbnet_get_stats64()
1256 stats->rx_over_errors = net->stats.rx_over_errors; in tbnet_get_stats64()
1257 stats->rx_crc_errors = net->stats.rx_crc_errors; in tbnet_get_stats64()
1258 stats->rx_missed_errors = net->stats.rx_missed_errors; in tbnet_get_stats64()
1270 const struct tbnet *net = netdev_priv(dev); in tbnet_generate_mac() local
1271 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac()
1291 struct tbnet *net; in tbnet_probe() local
1294 dev = alloc_etherdev(sizeof(*net)); in tbnet_probe()
1300 net = netdev_priv(dev); in tbnet_probe()
1301 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); in tbnet_probe()
1302 INIT_WORK(&net->connected_work, tbnet_connected_work); in tbnet_probe()
1303 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); in tbnet_probe()
1304 mutex_init(&net->connection_lock); in tbnet_probe()
1305 atomic_set(&net->command_id, 0); in tbnet_probe()
1306 atomic_set(&net->frame_id, 0); in tbnet_probe()
1307 net->svc = svc; in tbnet_probe()
1308 net->dev = dev; in tbnet_probe()
1309 net->xd = xd; in tbnet_probe()
1334 netif_napi_add(dev, &net->napi, tbnet_poll); in tbnet_probe()
1340 net->handler.uuid = &tbnet_svc_uuid; in tbnet_probe()
1341 net->handler.callback = tbnet_handle_packet; in tbnet_probe()
1342 net->handler.data = net; in tbnet_probe()
1343 tb_register_protocol_handler(&net->handler); in tbnet_probe()
1345 tb_service_set_drvdata(svc, net); in tbnet_probe()
1349 tb_unregister_protocol_handler(&net->handler); in tbnet_probe()
1359 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_remove() local
1361 unregister_netdev(net->dev); in tbnet_remove()
1362 tb_unregister_protocol_handler(&net->handler); in tbnet_remove()
1363 free_netdev(net->dev); in tbnet_remove()
1374 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_suspend() local
1376 stop_login(net); in tbnet_suspend()
1377 if (netif_running(net->dev)) { in tbnet_suspend()
1378 netif_device_detach(net->dev); in tbnet_suspend()
1379 tbnet_tear_down(net, true); in tbnet_suspend()
1382 tb_unregister_protocol_handler(&net->handler); in tbnet_suspend()
1389 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_resume() local
1391 tb_register_protocol_handler(&net->handler); in tbnet_resume()
1393 netif_carrier_off(net->dev); in tbnet_resume()
1394 if (netif_running(net->dev)) { in tbnet_resume()
1395 netif_device_attach(net->dev); in tbnet_resume()
1396 start_login(net); in tbnet_resume()
1413 .name = "thunderbolt-net",