Commit 296403f9 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-thunderbolt-add-tracepoints'

Mika Westerberg says:

====================
net: thunderbolt: Add tracepoints

This series adds tracepoints and additional logging to the
Thunderbolt/USB4 networking driver. These are useful when debugging
possible issues.

Before that we move the driver into its own directory under drivers/net
so that we can add additional files without trashing the network drivers
main directory, and update the MAINTAINERS accordingly.

v1: https://lore.kernel.org/netdev/20230104081731.45928-1-mika.westerberg@linux.intel.com/
====================

Link: https://lore.kernel.org/r/20230111062633.1385-1-mika.westerberg@linux.intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents dec5efcf f7586527
...@@ -20789,7 +20789,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com> ...@@ -20789,7 +20789,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com> M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: drivers/net/thunderbolt.c F: drivers/net/thunderbolt/
THUNDERX GPIO DRIVER THUNDERX GPIO DRIVER
M: Robert Richter <rric@kernel.org> M: Robert Richter <rric@kernel.org>
......
...@@ -583,18 +583,7 @@ config FUJITSU_ES ...@@ -583,18 +583,7 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series. on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
config USB4_NET source "drivers/net/thunderbolt/Kconfig"
tristate "Networking over USB4 and Thunderbolt cables"
depends on USB4 && INET
help
Select this if you want to create network between two computers
over a USB4 and Thunderbolt cables. The driver supports Apple
ThunderboltIP protocol and allows communication with any host
supporting the same protocol including Windows and macOS.
To compile this driver a module, choose M here. The module will be
called thunderbolt-net.
source "drivers/net/hyperv/Kconfig" source "drivers/net/hyperv/Kconfig"
config NETDEVSIM config NETDEVSIM
......
...@@ -84,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/ ...@@ -84,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/ obj-$(CONFIG_FUJITSU_ES) += fjes/
obj-$(CONFIG_USB4_NET) += thunderbolt/
thunderbolt-net-y += thunderbolt.o
obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
obj-$(CONFIG_NETDEVSIM) += netdevsim/ obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o obj-$(CONFIG_NET_FAILOVER) += net_failover.o
# SPDX-License-Identifier: GPL-2.0-only
config USB4_NET
tristate "Networking over USB4 and Thunderbolt cables"
depends on USB4 && INET
help
Select this if you want to create network between two computers
over a USB4 and Thunderbolt cables. The driver supports Apple
ThunderboltIP protocol and allows communication with any host
supporting the same protocol including Windows and macOS.
To compile this driver a module, choose M here. The module will be
called thunderbolt_net.
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
thunderbolt_net-objs := main.o trace.o
# Tracepoints need to know where to find trace.h
CFLAGS_trace.o := -I$(src)
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include "trace.h"
/* Protocol timeouts in ms */ /* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500 #define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500 #define TBNET_LOGIN_TIMEOUT 500
...@@ -305,6 +307,8 @@ static int tbnet_logout_request(struct tbnet *net) ...@@ -305,6 +307,8 @@ static int tbnet_logout_request(struct tbnet *net)
static void start_login(struct tbnet *net) static void start_login(struct tbnet *net)
{ {
netdev_dbg(net->dev, "login started\n");
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
net->login_sent = false; net->login_sent = false;
net->login_received = false; net->login_received = false;
...@@ -318,6 +322,8 @@ static void stop_login(struct tbnet *net) ...@@ -318,6 +322,8 @@ static void stop_login(struct tbnet *net)
{ {
cancel_delayed_work_sync(&net->login_work); cancel_delayed_work_sync(&net->login_work);
cancel_work_sync(&net->connected_work); cancel_work_sync(&net->connected_work);
netdev_dbg(net->dev, "login stopped\n");
} }
static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
...@@ -349,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring) ...@@ -349,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
size = TBNET_RX_PAGE_SIZE; size = TBNET_RX_PAGE_SIZE;
} }
trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
if (tf->frame.buffer_phy) if (tf->frame.buffer_phy)
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
dir); dir);
...@@ -374,6 +382,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -374,6 +382,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
int ret, retries = TBNET_LOGOUT_RETRIES; int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) { while (send_logout && retries-- > 0) {
netdev_dbg(net->dev, "sending logout request %u\n",
retries);
ret = tbnet_logout_request(net); ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT) if (ret != -ETIMEDOUT)
break; break;
...@@ -400,6 +410,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -400,6 +410,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
net->login_sent = false; net->login_sent = false;
net->login_received = false; net->login_received = false;
netdev_dbg(net->dev, "network traffic stopped\n");
mutex_unlock(&net->connection_lock); mutex_unlock(&net->connection_lock);
} }
...@@ -431,12 +443,15 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) ...@@ -431,12 +443,15 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
switch (pkg->hdr.type) { switch (pkg->hdr.type) {
case TBIP_LOGIN: case TBIP_LOGIN:
netdev_dbg(net->dev, "remote login request received\n");
if (!netif_running(net->dev)) if (!netif_running(net->dev))
break; break;
ret = tbnet_login_response(net, route, sequence, ret = tbnet_login_response(net, route, sequence,
pkg->hdr.command_id); pkg->hdr.command_id);
if (!ret) { if (!ret) {
netdev_dbg(net->dev, "remote login response sent\n");
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
net->login_received = true; net->login_received = true;
net->remote_transmit_path = pkg->transmit_path; net->remote_transmit_path = pkg->transmit_path;
...@@ -458,9 +473,12 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) ...@@ -458,9 +473,12 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
break; break;
case TBIP_LOGOUT: case TBIP_LOGOUT:
netdev_dbg(net->dev, "remote logout request received\n");
ret = tbnet_logout_response(net, route, sequence, command_id); ret = tbnet_logout_response(net, route, sequence, command_id);
if (!ret) if (!ret) {
netdev_dbg(net->dev, "remote logout response sent\n");
queue_work(system_long_wq, &net->disconnect_work); queue_work(system_long_wq, &net->disconnect_work);
}
break; break;
default: default:
...@@ -512,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) ...@@ -512,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
tf->frame.buffer_phy = dma_addr; tf->frame.buffer_phy = dma_addr;
tf->dev = net->dev; tf->dev = net->dev;
trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
DMA_FROM_DEVICE);
tb_ring_rx(ring->ring, &tf->frame); tb_ring_rx(ring->ring, &tf->frame);
ring->prod++; ring->prod++;
...@@ -588,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net) ...@@ -588,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
tf->frame.callback = tbnet_tx_callback; tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START; tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END; tf->frame.eof = TBIP_PDF_FRAME_END;
trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
} }
ring->cons = 0; ring->cons = 0;
...@@ -612,6 +635,8 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -612,6 +635,8 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected) if (!connected)
return; return;
netdev_dbg(net->dev, "login successful, enabling paths\n");
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) { if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n"); netdev_err(net->dev, "failed to allocate Rx HopID\n");
...@@ -647,6 +672,8 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -647,6 +672,8 @@ static void tbnet_connected_work(struct work_struct *work)
netif_carrier_on(net->dev); netif_carrier_on(net->dev);
netif_start_queue(net->dev); netif_start_queue(net->dev);
netdev_dbg(net->dev, "network traffic started\n");
return; return;
err_free_tx_buffers: err_free_tx_buffers:
...@@ -668,8 +695,13 @@ static void tbnet_login_work(struct work_struct *work) ...@@ -668,8 +695,13 @@ static void tbnet_login_work(struct work_struct *work)
if (netif_carrier_ok(net->dev)) if (netif_carrier_ok(net->dev))
return; return;
netdev_dbg(net->dev, "sending login request, retries=%u\n",
net->login_retries);
ret = tbnet_login_request(net, net->login_retries % 4); ret = tbnet_login_request(net, net->login_retries % 4);
if (ret) { if (ret) {
netdev_dbg(net->dev, "sending login request failed, ret=%d\n",
ret);
if (net->login_retries++ < TBNET_LOGIN_RETRIES) { if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
queue_delayed_work(system_long_wq, &net->login_work, queue_delayed_work(system_long_wq, &net->login_work,
delay); delay);
...@@ -677,6 +709,8 @@ static void tbnet_login_work(struct work_struct *work) ...@@ -677,6 +709,8 @@ static void tbnet_login_work(struct work_struct *work)
netdev_info(net->dev, "ThunderboltIP login timed out\n"); netdev_info(net->dev, "ThunderboltIP login timed out\n");
} }
} else { } else {
netdev_dbg(net->dev, "received login reply\n");
net->login_retries = 0; net->login_retries = 0;
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
...@@ -807,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget) ...@@ -807,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
hdr = page_address(page); hdr = page_address(page);
if (!tbnet_check_frame(net, tf, hdr)) { if (!tbnet_check_frame(net, tf, hdr)) {
trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
hdr->frame_id, hdr->frame_index, hdr->frame_count);
__free_pages(page, TBNET_RX_PAGE_ORDER); __free_pages(page, TBNET_RX_PAGE_ORDER);
dev_kfree_skb_any(net->skb); dev_kfree_skb_any(net->skb);
net->skb = NULL; net->skb = NULL;
continue; continue;
} }
trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size); frame_size = le32_to_cpu(hdr->frame_size);
skb = net->skb; skb = net->skb;
...@@ -846,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget) ...@@ -846,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
if (last) { if (last) {
skb->protocol = eth_type_trans(skb, net->dev); skb->protocol = eth_type_trans(skb, net->dev);
trace_tbnet_rx_skb(skb);
napi_gro_receive(&net->napi, skb); napi_gro_receive(&net->napi, skb);
net->skb = NULL; net->skb = NULL;
} }
...@@ -965,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, ...@@ -965,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
for (i = 0; i < frame_count; i++) { for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page); hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count); hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
dma_sync_single_for_device(dma_dev, dma_sync_single_for_device(dma_dev,
frames[i]->frame.buffer_phy, frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE); tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
...@@ -1029,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, ...@@ -1029,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
len = le32_to_cpu(hdr->frame_size) - offset; len = le32_to_cpu(hdr->frame_size) - offset;
wsum = csum_partial(dest, len, wsum); wsum = csum_partial(dest, len, wsum);
hdr->frame_count = cpu_to_le32(frame_count); hdr->frame_count = cpu_to_le32(frame_count);
trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
hdr->frame_index, hdr->frame_count);
offset = 0; offset = 0;
} }
...@@ -1071,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, ...@@ -1071,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
bool unmap = false; bool unmap = false;
void *dest; void *dest;
trace_tbnet_tx_skb(skb);
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
if (tbnet_available_buffers(&net->tx_ring) < nframes) { if (tbnet_available_buffers(&net->tx_ring) < nframes) {
netif_stop_queue(net->dev); netif_stop_queue(net->dev);
...@@ -1177,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, ...@@ -1177,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
net->stats.tx_packets++; net->stats.tx_packets++;
net->stats.tx_bytes += skb->len; net->stats.tx_bytes += skb->len;
trace_tbnet_consume_skb(skb);
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoints for Thunderbolt/USB4 networking driver
*
* Copyright (C) 2023, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#define CREATE_TRACE_POINTS
#include "trace.h"
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Tracepoints for Thunderbolt/USB4 networking driver
*
* Copyright (C) 2023, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM thunderbolt_net
#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_THUNDERBOLT_NET_H
#include <linux/dma-direction.h>
#include <linux/skbuff.h>
#include <linux/tracepoint.h>
#define DMA_DATA_DIRECTION_NAMES \
{ DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \
{ DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \
{ DMA_NONE, "DMA_NONE" }
DECLARE_EVENT_CLASS(tbnet_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir),
TP_STRUCT__entry(
__field(unsigned int, index)
__field(const void *, page)
__field(dma_addr_t, phys)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
__entry->index = index;
__entry->page = page;
__entry->phys = phys;
__entry->dir = dir;
),
TP_printk("index=%u page=%p phys=%pad dir=%s",
__entry->index, __entry->page, &__entry->phys,
__print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
);
DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
enum dma_data_direction dir),
TP_ARGS(index, page, phys, dir)
);
DECLARE_EVENT_CLASS(tbnet_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count),
TP_STRUCT__entry(
__field(u32, size)
__field(u16, id)
__field(u16, index)
__field(u32, count)
),
TP_fast_assign(
__entry->size = le32_to_cpu(size);
__entry->id = le16_to_cpu(id);
__entry->index = le16_to_cpu(index);
__entry->count = le32_to_cpu(count);
),
TP_printk("id=%u size=%u index=%u count=%u",
__entry->id, __entry->size, __entry->index, __entry->count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
TP_ARGS(size, id, index, count)
);
DECLARE_EVENT_CLASS(tbnet_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__field(const void *, addr)
__field(unsigned int, len)
__field(unsigned int, data_len)
__field(unsigned int, nr_frags)
),
TP_fast_assign(
__entry->addr = skb;
__entry->len = skb->len;
__entry->data_len = skb->data_len;
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
),
TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
__entry->addr, __entry->len, __entry->data_len,
__entry->nr_frags)
);
DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
TP_PROTO(const struct sk_buff *skb),
TP_ARGS(skb)
);
#endif /* _TRACE_THUNDERBOLT_NET_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment