Commit 5367f82a authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'thunderbolt-for-v5.13-rc1' of...

Merge tag 'thunderbolt-for-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v5.13 merge window

This includes following Thunderbolt/USB4 changes for v5.13 merge window:

  * Debugfs improvements

  * Align the inter-domain (peer-to-peer) support with the USB4
    inter-domain spec for better interoperability

  * Add support for USB4 DROM and the new product descriptor

  * More KUnit tests

  * Detailed uevent for routers

  * Few miscellaneous improvements

All these have been in linux-next without reported issues.

* tag 'thunderbolt-for-v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (24 commits)
  thunderbolt: Hide authorized attribute if router does not support PCIe tunnels
  thunderbolt: Add details to router uevent
  thunderbolt: Unlock on error path in tb_domain_add()
  thunderbolt: Add support for USB4 DROM
  thunderbolt: Check quirks in tb_switch_add()
  thunderbolt: Add KUnit tests for DMA tunnels
  thunderbolt: Add KUnit tests for XDomain properties
  net: thunderbolt: Align the driver to the USB4 networking spec
  thunderbolt: Allow multiple DMA tunnels over a single XDomain connection
  thunderbolt: Drop unused tb_port_set_initial_credits()
  thunderbolt: Use dedicated flow control for DMA tunnels
  thunderbolt: Add support for maxhopid XDomain property
  thunderbolt: Add tb_property_copy_dir()
  thunderbolt: Align XDomain protocol timeouts with the spec
  thunderbolt: Use pseudo-random number as initial property block generation
  thunderbolt: Do not re-establish XDomain DMA paths automatically
  thunderbolt: Add more logging to XDomain connections
  Documentation / thunderbolt: Drop speed/lanes entries for XDomain
  thunderbolt: Decrease control channel timeout for software connection manager
  thunderbolt: Do not pass timeout for tb_cfg_reset()
  ...
parents 9bc46a12 6f3badea
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain RX speed per lane.
All RX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/rx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the number of RX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_speed
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports the XDomain TX speed per lane.
All TX lanes run at the same speed.
What: /sys/bus/thunderbolt/devices/<xdomain>/tx_lanes
Date: Feb 2021
KernelVersion: 5.11
Contact: Isaac Hazan <isaac.hazan@intel.com>
Description: This attribute reports number of TX lanes the XDomain
is using simultaneously through its upstream port.
What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
Date: Jun 2018 Date: Jun 2018
KernelVersion: 4.17 KernelVersion: 4.17
...@@ -162,6 +134,13 @@ Contact: thunderbolt-software@lists.01.org ...@@ -162,6 +134,13 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains name of this device extracted from Description: This attribute contains name of this device extracted from
the device DROM. the device DROM.
What: /sys/bus/thunderbolt/devices/.../maxhopid
Date: Jul 2021
KernelVersion: 5.13
Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: Only set for XDomains. The maximum HopID the other host
supports as its input HopID.
What: /sys/bus/thunderbolt/devices/.../rx_speed What: /sys/bus/thunderbolt/devices/.../rx_speed
Date: Jan 2020 Date: Jan 2020
KernelVersion: 5.5 KernelVersion: 5.5
......
...@@ -25,13 +25,13 @@ ...@@ -25,13 +25,13 @@
/* Protocol timeouts in ms */ /* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500 #define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500 #define TBNET_LOGIN_TIMEOUT 500
#define TBNET_LOGOUT_TIMEOUT 100 #define TBNET_LOGOUT_TIMEOUT 1000
#define TBNET_RING_SIZE 256 #define TBNET_RING_SIZE 256
#define TBNET_LOCAL_PATH 0xf
#define TBNET_LOGIN_RETRIES 60 #define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 5 #define TBNET_LOGOUT_RETRIES 10
#define TBNET_MATCH_FRAGS_ID BIT(1) #define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K #define TBNET_MAX_MTU SZ_64K
#define TBNET_FRAME_SIZE SZ_4K #define TBNET_FRAME_SIZE SZ_4K
#define TBNET_MAX_PAYLOAD_SIZE \ #define TBNET_MAX_PAYLOAD_SIZE \
...@@ -154,8 +154,8 @@ struct tbnet_ring { ...@@ -154,8 +154,8 @@ struct tbnet_ring {
* @login_sent: ThunderboltIP login message successfully sent * @login_sent: ThunderboltIP login message successfully sent
* @login_received: ThunderboltIP login message received from the remote * @login_received: ThunderboltIP login message received from the remote
* host * host
* @transmit_path: HopID the other end needs to use building the * @local_transmit_path: HopID we are using to send out packets
* opposite side path. * @remote_transmit_path: HopID the other end is using to send packets to us
* @connection_lock: Lock serializing access to @login_sent, * @connection_lock: Lock serializing access to @login_sent,
* @login_received and @transmit_path. * @login_received and @transmit_path.
* @login_retries: Number of login retries currently done * @login_retries: Number of login retries currently done
...@@ -184,7 +184,8 @@ struct tbnet { ...@@ -184,7 +184,8 @@ struct tbnet {
atomic_t command_id; atomic_t command_id;
bool login_sent; bool login_sent;
bool login_received; bool login_received;
u32 transmit_path; int local_transmit_path;
int remote_transmit_path;
struct mutex connection_lock; struct mutex connection_lock;
int login_retries; int login_retries;
struct delayed_work login_work; struct delayed_work login_work;
...@@ -257,7 +258,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence) ...@@ -257,7 +258,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence)
atomic_inc_return(&net->command_id)); atomic_inc_return(&net->command_id));
request.proto_version = TBIP_LOGIN_PROTO_VERSION; request.proto_version = TBIP_LOGIN_PROTO_VERSION;
request.transmit_path = TBNET_LOCAL_PATH; request.transmit_path = net->local_transmit_path;
return tb_xdomain_request(xd, &request, sizeof(request), return tb_xdomain_request(xd, &request, sizeof(request),
TB_CFG_PKG_XDOMAIN_RESP, &reply, TB_CFG_PKG_XDOMAIN_RESP, &reply,
...@@ -364,10 +365,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -364,10 +365,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
if (net->login_sent && net->login_received) { if (net->login_sent && net->login_received) {
int retries = TBNET_LOGOUT_RETRIES; int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) { while (send_logout && retries-- > 0) {
int ret = tbnet_logout_request(net); ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT) if (ret != -ETIMEDOUT)
break; break;
} }
...@@ -377,8 +378,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout) ...@@ -377,8 +378,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
tbnet_free_buffers(&net->rx_ring); tbnet_free_buffers(&net->rx_ring);
tbnet_free_buffers(&net->tx_ring); tbnet_free_buffers(&net->tx_ring);
if (tb_xdomain_disable_paths(net->xd)) ret = tb_xdomain_disable_paths(net->xd,
net->local_transmit_path,
net->rx_ring.ring->hop,
net->remote_transmit_path,
net->tx_ring.ring->hop);
if (ret)
netdev_warn(net->dev, "failed to disable DMA paths\n"); netdev_warn(net->dev, "failed to disable DMA paths\n");
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
net->remote_transmit_path = 0;
} }
net->login_retries = 0; net->login_retries = 0;
...@@ -424,7 +433,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) ...@@ -424,7 +433,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
if (!ret) { if (!ret) {
mutex_lock(&net->connection_lock); mutex_lock(&net->connection_lock);
net->login_received = true; net->login_received = true;
net->transmit_path = pkg->transmit_path; net->remote_transmit_path = pkg->transmit_path;
/* If we reached the number of max retries or /* If we reached the number of max retries or
* previous logout, schedule another round of * previous logout, schedule another round of
...@@ -597,12 +606,18 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -597,12 +606,18 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected) if (!connected)
return; return;
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n");
return;
}
/* Both logins successful so enable the high-speed DMA paths and /* Both logins successful so enable the high-speed DMA paths and
* start the network device queue. * start the network device queue.
*/ */
ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
net->rx_ring.ring->hop, net->rx_ring.ring->hop,
net->transmit_path, net->remote_transmit_path,
net->tx_ring.ring->hop); net->tx_ring.ring->hop);
if (ret) { if (ret) {
netdev_err(net->dev, "failed to enable DMA paths\n"); netdev_err(net->dev, "failed to enable DMA paths\n");
...@@ -629,6 +644,7 @@ static void tbnet_connected_work(struct work_struct *work) ...@@ -629,6 +644,7 @@ static void tbnet_connected_work(struct work_struct *work)
err_stop_rings: err_stop_rings:
tb_ring_stop(net->rx_ring.ring); tb_ring_stop(net->rx_ring.ring);
tb_ring_stop(net->tx_ring.ring); tb_ring_stop(net->tx_ring.ring);
tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
} }
static void tbnet_login_work(struct work_struct *work) static void tbnet_login_work(struct work_struct *work)
...@@ -851,6 +867,7 @@ static int tbnet_open(struct net_device *dev) ...@@ -851,6 +867,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd; struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask; u16 sof_mask, eof_mask;
struct tb_ring *ring; struct tb_ring *ring;
int hopid;
netif_carrier_off(dev); netif_carrier_off(dev);
...@@ -862,6 +879,15 @@ static int tbnet_open(struct net_device *dev) ...@@ -862,6 +879,15 @@ static int tbnet_open(struct net_device *dev)
} }
net->tx_ring.ring = ring; net->tx_ring.ring = ring;
hopid = tb_xdomain_alloc_out_hopid(xd, -1);
if (hopid < 0) {
netdev_err(dev, "failed to allocate Tx HopID\n");
tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL;
return hopid;
}
net->local_transmit_path = hopid;
sof_mask = BIT(TBIP_PDF_FRAME_START); sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END); eof_mask = BIT(TBIP_PDF_FRAME_END);
...@@ -893,6 +919,8 @@ static int tbnet_stop(struct net_device *dev) ...@@ -893,6 +919,8 @@ static int tbnet_stop(struct net_device *dev)
tb_ring_free(net->rx_ring.ring); tb_ring_free(net->rx_ring.ring);
net->rx_ring.ring = NULL; net->rx_ring.ring = NULL;
tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
tb_ring_free(net->tx_ring.ring); tb_ring_free(net->tx_ring.ring);
net->tx_ring.ring = NULL; net->tx_ring.ring = NULL;
...@@ -1340,7 +1368,7 @@ static int __init tbnet_init(void) ...@@ -1340,7 +1368,7 @@ static int __init tbnet_init(void)
* the moment. * the moment.
*/ */
tb_property_add_immediate(tbnet_dir, "prtcstns", tb_property_add_immediate(tbnet_dir, "prtcstns",
TBNET_MATCH_FRAGS_ID); TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
ret = tb_register_property_dir("network", tbnet_dir); ret = tb_register_property_dir("network", tbnet_dir);
if (ret) { if (ret) {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define TB_CTL_RX_PKG_COUNT 10 #define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4 #define TB_CTL_RETRIES 1
/** /**
* struct tb_ctl - Thunderbolt control channel * struct tb_ctl - Thunderbolt control channel
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* @request_queue_lock: Lock protecting @request_queue * @request_queue_lock: Lock protecting @request_queue
* @request_queue: List of outstanding requests * @request_queue: List of outstanding requests
* @running: Is the control channel running at the moment * @running: Is the control channel running at the moment
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received * @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback * @callback_data: Data passed to @callback
*/ */
...@@ -43,6 +44,7 @@ struct tb_ctl { ...@@ -43,6 +44,7 @@ struct tb_ctl {
struct list_head request_queue; struct list_head request_queue;
bool running; bool running;
int timeout_msec;
event_cb callback; event_cb callback;
void *callback_data; void *callback_data;
}; };
...@@ -613,6 +615,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, ...@@ -613,6 +615,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/** /**
* tb_ctl_alloc() - allocate a control channel * tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI * @nhi: Pointer to NHI
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events * @cb: Callback called for plug events
* @cb_data: Data passed to @cb * @cb_data: Data passed to @cb
* *
...@@ -620,13 +623,15 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, ...@@ -620,13 +623,15 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
* *
* Return: Returns a pointer on success or NULL on failure. * Return: Returns a pointer on success or NULL on failure.
*/ */
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
void *cb_data)
{ {
int i; int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl) if (!ctl)
return NULL; return NULL;
ctl->nhi = nhi; ctl->nhi = nhi;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb; ctl->callback = cb;
ctl->callback_data = cb_data; ctl->callback_data = cb_data;
...@@ -802,14 +807,12 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) ...@@ -802,14 +807,12 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
* tb_cfg_reset() - send a reset packet and wait for a response * tb_cfg_reset() - send a reset packet and wait for a response
* @ctl: Control channel pointer * @ctl: Control channel pointer
* @route: Router string for the router to send reset * @route: Router string for the router to send reset
* @timeout_msec: Timeout in ms how long to wait for the response
* *
* If the switch at route is incorrectly configured then we will not receive a * If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for * reply (even though the switch will reset). The caller should check for
* -ETIMEDOUT and attempt to reconfigure the switch. * -ETIMEDOUT and attempt to reconfigure the switch.
*/ */
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
int timeout_msec)
{ {
struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
struct tb_cfg_result res = { 0 }; struct tb_cfg_result res = { 0 };
...@@ -831,7 +834,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, ...@@ -831,7 +834,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
req->response_size = sizeof(reply); req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_RESET; req->response_type = TB_CFG_PKG_RESET;
res = tb_cfg_request_sync(ctl, req, timeout_msec); res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
tb_cfg_request_put(req); tb_cfg_request_put(req);
...@@ -1007,7 +1010,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, ...@@ -1007,7 +1010,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length) enum tb_cfg_space space, u32 offset, u32 length)
{ {
struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT); space, offset, length, ctl->timeout_msec);
switch (res.err) { switch (res.err) {
case 0: case 0:
/* Success */ /* Success */
...@@ -1033,7 +1036,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, ...@@ -1033,7 +1036,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length) enum tb_cfg_space space, u32 offset, u32 length)
{ {
struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT); space, offset, length, ctl->timeout_msec);
switch (res.err) { switch (res.err) {
case 0: case 0:
/* Success */ /* Success */
...@@ -1071,7 +1074,7 @@ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) ...@@ -1071,7 +1074,7 @@ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
u32 dummy; u32 dummy;
struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
TB_CFG_SWITCH, 0, 1, TB_CFG_SWITCH, 0, 1,
TB_CFG_DEFAULT_TIMEOUT); ctl->timeout_msec);
if (res.err == 1) if (res.err == 1)
return -EIO; return -EIO;
if (res.err) if (res.err)
......
...@@ -21,15 +21,14 @@ struct tb_ctl; ...@@ -21,15 +21,14 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type, typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size); const void *buf, size_t size);
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data); struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl); void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl); void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl); void tb_ctl_free(struct tb_ctl *ctl);
/* configuration commands */ /* configuration commands */
#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
struct tb_cfg_result { struct tb_cfg_result {
u64 response_route; u64 response_route;
u32 response_port; /* u32 response_port; /*
...@@ -124,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route) ...@@ -124,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
} }
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route);
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, enum tb_cfg_space space, u32 offset,
......
...@@ -251,6 +251,29 @@ static ssize_t counters_write(struct file *file, const char __user *user_buf, ...@@ -251,6 +251,29 @@ static ssize_t counters_write(struct file *file, const char __user *user_buf,
return ret < 0 ? ret : count; return ret < 0 ? ret : count;
} }
static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap,
unsigned int offset, u8 cap_id, u8 vsec_id,
int dwords)
{
int i, ret;
u32 data;
for (i = 0; i < dwords; i++) {
if (port)
ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1);
else
ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1);
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i);
continue;
}
seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i,
offset + i, cap_id, vsec_id, data);
}
}
static void cap_show(struct seq_file *s, struct tb_switch *sw, static void cap_show(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap, u8 cap_id, struct tb_port *port, unsigned int cap, u8 cap_id,
u8 vsec_id, int length) u8 vsec_id, int length)
...@@ -267,10 +290,7 @@ static void cap_show(struct seq_file *s, struct tb_switch *sw, ...@@ -267,10 +290,7 @@ static void cap_show(struct seq_file *s, struct tb_switch *sw,
else else
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords); ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
if (ret) { if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length);
cap + offset);
if (dwords > 1)
seq_printf(s, "0x%04x ...\n", cap + offset + 1);
return; return;
} }
...@@ -341,15 +361,6 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s, ...@@ -341,15 +361,6 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
} else { } else {
length = header.extended_short.length; length = header.extended_short.length;
vsec_id = header.extended_short.vsec_id; vsec_id = header.extended_short.vsec_id;
/*
* Ice Lake and Tiger Lake do not implement the
* full length of the capability, only first 32
* dwords so hard-code it here.
*/
if (!vsec_id &&
(tb_switch_is_ice_lake(port->sw) ||
tb_switch_is_tiger_lake(port->sw)))
length = 32;
} }
break; break;
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/thunderbolt.h> #include <linux/thunderbolt.h>
#define DMA_TEST_HOPID 8
#define DMA_TEST_TX_RING_SIZE 64 #define DMA_TEST_TX_RING_SIZE 64
#define DMA_TEST_RX_RING_SIZE 256 #define DMA_TEST_RX_RING_SIZE 256
#define DMA_TEST_FRAME_SIZE SZ_4K #define DMA_TEST_FRAME_SIZE SZ_4K
...@@ -72,7 +71,9 @@ static const char * const dma_test_result_names[] = { ...@@ -72,7 +71,9 @@ static const char * const dma_test_result_names[] = {
* @svc: XDomain service the driver is bound to * @svc: XDomain service the driver is bound to
* @xd: XDomain the service belongs to * @xd: XDomain the service belongs to
* @rx_ring: Software ring holding RX frames * @rx_ring: Software ring holding RX frames
* @rx_hopid: HopID used for receiving frames
* @tx_ring: Software ring holding TX frames * @tx_ring: Software ring holding TX frames
* @tx_hopid: HopID used for sending fames
* @packets_to_send: Number of packets to send * @packets_to_send: Number of packets to send
* @packets_to_receive: Number of packets to receive * @packets_to_receive: Number of packets to receive
* @packets_sent: Actual number of packets sent * @packets_sent: Actual number of packets sent
...@@ -92,7 +93,9 @@ struct dma_test { ...@@ -92,7 +93,9 @@ struct dma_test {
const struct tb_service *svc; const struct tb_service *svc;
struct tb_xdomain *xd; struct tb_xdomain *xd;
struct tb_ring *rx_ring; struct tb_ring *rx_ring;
int rx_hopid;
struct tb_ring *tx_ring; struct tb_ring *tx_ring;
int tx_hopid;
unsigned int packets_to_send; unsigned int packets_to_send;
unsigned int packets_to_receive; unsigned int packets_to_receive;
unsigned int packets_sent; unsigned int packets_sent;
...@@ -119,10 +122,12 @@ static void *dma_test_pattern; ...@@ -119,10 +122,12 @@ static void *dma_test_pattern;
static void dma_test_free_rings(struct dma_test *dt) static void dma_test_free_rings(struct dma_test *dt)
{ {
if (dt->rx_ring) { if (dt->rx_ring) {
tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
tb_ring_free(dt->rx_ring); tb_ring_free(dt->rx_ring);
dt->rx_ring = NULL; dt->rx_ring = NULL;
} }
if (dt->tx_ring) { if (dt->tx_ring) {
tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
tb_ring_free(dt->tx_ring); tb_ring_free(dt->tx_ring);
dt->tx_ring = NULL; dt->tx_ring = NULL;
} }
...@@ -151,6 +156,14 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -151,6 +156,14 @@ static int dma_test_start_rings(struct dma_test *dt)
dt->tx_ring = ring; dt->tx_ring = ring;
e2e_tx_hop = ring->hop; e2e_tx_hop = ring->hop;
ret = tb_xdomain_alloc_out_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
}
dt->tx_hopid = ret;
} }
if (dt->packets_to_receive) { if (dt->packets_to_receive) {
...@@ -168,11 +181,19 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -168,11 +181,19 @@ static int dma_test_start_rings(struct dma_test *dt)
} }
dt->rx_ring = ring; dt->rx_ring = ring;
ret = tb_xdomain_alloc_in_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
}
dt->rx_hopid = ret;
} }
ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID, ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0, dt->tx_ring ? dt->tx_ring->hop : 0,
DMA_TEST_HOPID, dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0); dt->rx_ring ? dt->rx_ring->hop : 0);
if (ret) { if (ret) {
dma_test_free_rings(dt); dma_test_free_rings(dt);
...@@ -189,12 +210,18 @@ static int dma_test_start_rings(struct dma_test *dt) ...@@ -189,12 +210,18 @@ static int dma_test_start_rings(struct dma_test *dt)
static void dma_test_stop_rings(struct dma_test *dt) static void dma_test_stop_rings(struct dma_test *dt)
{ {
int ret;
if (dt->rx_ring) if (dt->rx_ring)
tb_ring_stop(dt->rx_ring); tb_ring_stop(dt->rx_ring);
if (dt->tx_ring) if (dt->tx_ring)
tb_ring_stop(dt->tx_ring); tb_ring_stop(dt->tx_ring);
if (tb_xdomain_disable_paths(dt->xd)) ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : 0,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : 0);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
dma_test_free_rings(dt); dma_test_free_rings(dt);
......
...@@ -341,9 +341,34 @@ struct device_type tb_domain_type = { ...@@ -341,9 +341,34 @@ struct device_type tb_domain_type = {
.release = tb_domain_release, .release = tb_domain_release,
}; };
static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
return true;
}
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
if (tb_is_xdomain_enabled())
return tb_xdomain_handle_request(tb, type, buf, size);
break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
}
return true;
}
/** /**
* tb_domain_alloc() - Allocate a domain * tb_domain_alloc() - Allocate a domain
* @nhi: Pointer to the host controller * @nhi: Pointer to the host controller
* @timeout_msec: Control channel timeout for non-raw messages
* @privsize: Size of the connection manager private data * @privsize: Size of the connection manager private data
* *
* Allocates and initializes a new Thunderbolt domain. Connection * Allocates and initializes a new Thunderbolt domain. Connection
...@@ -355,7 +380,7 @@ struct device_type tb_domain_type = { ...@@ -355,7 +380,7 @@ struct device_type tb_domain_type = {
* *
* Return: allocated domain structure on %NULL in case of error * Return: allocated domain structure on %NULL in case of error
*/ */
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
{ {
struct tb *tb; struct tb *tb;
...@@ -382,6 +407,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -382,6 +407,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
if (!tb->wq) if (!tb->wq)
goto err_remove_ida; goto err_remove_ida;
tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
tb->dev.parent = &nhi->pdev->dev; tb->dev.parent = &nhi->pdev->dev;
tb->dev.bus = &tb_bus_type; tb->dev.bus = &tb_bus_type;
tb->dev.type = &tb_domain_type; tb->dev.type = &tb_domain_type;
...@@ -391,6 +420,8 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -391,6 +420,8 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
return tb; return tb;
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida: err_remove_ida:
ida_simple_remove(&tb_domain_ida, tb->index); ida_simple_remove(&tb_domain_ida, tb->index);
err_free: err_free:
...@@ -399,30 +430,6 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) ...@@ -399,30 +430,6 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
return NULL; return NULL;
} }
static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
return true;
}
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
if (tb_is_xdomain_enabled())
return tb_xdomain_handle_request(tb, type, buf, size);
break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
}
return true;
}
/** /**
* tb_domain_add() - Add domain to the system * tb_domain_add() - Add domain to the system
* @tb: Domain to add * @tb: Domain to add
...@@ -442,13 +449,6 @@ int tb_domain_add(struct tb *tb) ...@@ -442,13 +449,6 @@ int tb_domain_add(struct tb *tb)
return -EINVAL; return -EINVAL;
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
if (!tb->ctl) {
ret = -ENOMEM;
goto err_unlock;
}
/* /*
* tb_schedule_hotplug_handler may be called as soon as the config * tb_schedule_hotplug_handler may be called as soon as the config
* channel is started. Thats why we have to hold the lock here. * channel is started. Thats why we have to hold the lock here.
...@@ -493,7 +493,6 @@ int tb_domain_add(struct tb *tb) ...@@ -493,7 +493,6 @@ int tb_domain_add(struct tb *tb)
device_del(&tb->dev); device_del(&tb->dev);
err_ctl_stop: err_ctl_stop:
tb_ctl_stop(tb->ctl); tb_ctl_stop(tb->ctl);
err_unlock:
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
return ret; return ret;
...@@ -793,6 +792,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) ...@@ -793,6 +792,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
* tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
* @tb: Domain enabling the DMA paths * @tb: Domain enabling the DMA paths
* @xd: XDomain DMA paths are created to * @xd: XDomain DMA paths are created to
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
* *
* Calls connection manager specific method to enable DMA paths to the * Calls connection manager specific method to enable DMA paths to the
* XDomain in question. * XDomain in question.
...@@ -801,18 +804,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) ...@@ -801,18 +804,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
* particular returns %-ENOTSUPP if the connection manager * particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains. * implementation does not support XDomains.
*/ */
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!tb->cm_ops->approve_xdomain_paths) if (!tb->cm_ops->approve_xdomain_paths)
return -ENOTSUPP; return -ENOTSUPP;
return tb->cm_ops->approve_xdomain_paths(tb, xd); return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
} }
/** /**
* tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
* @tb: Domain disabling the DMA paths * @tb: Domain disabling the DMA paths
* @xd: XDomain whose DMA paths are disconnected * @xd: XDomain whose DMA paths are disconnected
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
* *
* Calls connection manager specific method to disconnect DMA paths to * Calls connection manager specific method to disconnect DMA paths to
* the XDomain in question. * the XDomain in question.
...@@ -821,12 +831,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -821,12 +831,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* particular returns %-ENOTSUPP if the connection manager * particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains. * implementation does not support XDomains.
*/ */
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!tb->cm_ops->disconnect_xdomain_paths) if (!tb->cm_ops->disconnect_xdomain_paths)
return -ENOTSUPP; return -ENOTSUPP;
return tb->cm_ops->disconnect_xdomain_paths(tb, xd); return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
} }
static int disconnect_xdomain(struct device *dev, void *data) static int disconnect_xdomain(struct device *dev, void *data)
...@@ -837,7 +850,7 @@ static int disconnect_xdomain(struct device *dev, void *data) ...@@ -837,7 +850,7 @@ static int disconnect_xdomain(struct device *dev, void *data)
xd = tb_to_xdomain(dev); xd = tb_to_xdomain(dev);
if (xd && xd->tb == tb) if (xd && xd->tb == tb)
ret = tb_xdomain_disable_paths(xd); ret = tb_xdomain_disable_all_paths(xd);
return ret; return ret;
} }
......
...@@ -277,6 +277,16 @@ struct tb_drom_entry_port { ...@@ -277,6 +277,16 @@ struct tb_drom_entry_port {
u8 unknown4:2; u8 unknown4:2;
} __packed; } __packed;
/* USB4 product descriptor */
struct tb_drom_entry_desc {
struct tb_drom_entry_header header;
u16 bcdUSBSpec;
u16 idVendor;
u16 idProduct;
u16 bcdProductFWRevision;
u32 TID;
u8 productHWRevision;
};
/** /**
* tb_drom_read_uid_only() - Read UID directly from DROM * tb_drom_read_uid_only() - Read UID directly from DROM
...@@ -329,6 +339,16 @@ static int tb_drom_parse_entry_generic(struct tb_switch *sw, ...@@ -329,6 +339,16 @@ static int tb_drom_parse_entry_generic(struct tb_switch *sw,
if (!sw->device_name) if (!sw->device_name)
return -ENOMEM; return -ENOMEM;
break; break;
case 9: {
const struct tb_drom_entry_desc *desc =
(const struct tb_drom_entry_desc *)entry;
if (!sw->vendor && !sw->device) {
sw->vendor = desc->idVendor;
sw->device = desc->idProduct;
}
break;
}
} }
return 0; return 0;
...@@ -521,6 +541,51 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, ...@@ -521,6 +541,51 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
return tb_eeprom_read_n(sw, offset, val, count); return tb_eeprom_read_n(sw, offset, val, count);
} }
static int tb_drom_parse(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc8((u8 *) &header->uid, 8);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"DROM UID CRC8 mismatch (expected: %#x, got: %#x), aborting\n",
header->uid_crc8, crc);
return -EINVAL;
}
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
}
return tb_drom_parse_entries(sw);
}
static int usb4_drom_parse(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
header->data_crc32, crc);
return -EINVAL;
}
return tb_drom_parse_entries(sw);
}
/** /**
* tb_drom_read() - Copy DROM to sw->drom and parse it * tb_drom_read() - Copy DROM to sw->drom and parse it
* @sw: Router whose DROM to read and parse * @sw: Router whose DROM to read and parse
...@@ -534,7 +599,6 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, ...@@ -534,7 +599,6 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
int tb_drom_read(struct tb_switch *sw) int tb_drom_read(struct tb_switch *sw)
{ {
u16 size; u16 size;
u32 crc;
struct tb_drom_header *header; struct tb_drom_header *header;
int res, retries = 1; int res, retries = 1;
...@@ -599,31 +663,21 @@ int tb_drom_read(struct tb_switch *sw) ...@@ -599,31 +663,21 @@ int tb_drom_read(struct tb_switch *sw)
goto err; goto err;
} }
crc = tb_crc8((u8 *) &header->uid, 8); tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
header->uid_crc8, crc);
goto err;
}
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;
tb_check_quirks(sw);
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); switch (header->device_rom_revision) {
if (crc != header->data_crc32) { case 3:
tb_sw_warn(sw, res = usb4_drom_parse(sw);
"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n", break;
header->data_crc32, crc); default:
tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n",
header->device_rom_revision);
fallthrough;
case 1:
res = tb_drom_parse(sw);
break;
} }
if (header->device_rom_revision > 2)
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
header->device_rom_revision);
res = tb_drom_parse_entries(sw);
/* If the DROM parsing fails, wait a moment and retry once */ /* If the DROM parsing fails, wait a moment and retry once */
if (res == -EILSEQ && retries--) { if (res == -EILSEQ && retries--) {
tb_sw_warn(sw, "parsing DROM failed, retrying\n"); tb_sw_warn(sw, "parsing DROM failed, retrying\n");
...@@ -633,10 +687,11 @@ int tb_drom_read(struct tb_switch *sw) ...@@ -633,10 +687,11 @@ int tb_drom_read(struct tb_switch *sw)
goto parse; goto parse;
} }
return res; if (!res)
return 0;
err: err:
kfree(sw->drom); kfree(sw->drom);
sw->drom = NULL; sw->drom = NULL;
return -EIO; return -EIO;
} }
...@@ -557,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, ...@@ -557,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0; return 0;
} }
static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct icm_fr_pkg_approve_xdomain_response reply; struct icm_fr_pkg_approve_xdomain_response reply;
struct icm_fr_pkg_approve_xdomain request; struct icm_fr_pkg_approve_xdomain request;
...@@ -568,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -568,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
request.transmit_path = xd->transmit_path; request.transmit_path = transmit_path;
request.transmit_ring = xd->transmit_ring; request.transmit_ring = transmit_ring;
request.receive_path = xd->receive_path; request.receive_path = receive_path;
request.receive_ring = xd->receive_ring; request.receive_ring = receive_ring;
memset(&reply, 0, sizeof(reply)); memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
...@@ -585,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -585,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0; return 0;
} }
static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
u8 phy_port; u8 phy_port;
u8 cmd; u8 cmd;
...@@ -1122,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, ...@@ -1122,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0; return 0;
} }
static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct icm_tr_pkg_approve_xdomain_response reply; struct icm_tr_pkg_approve_xdomain_response reply;
struct icm_tr_pkg_approve_xdomain request; struct icm_tr_pkg_approve_xdomain request;
...@@ -1132,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1132,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
request.hdr.code = ICM_APPROVE_XDOMAIN; request.hdr.code = ICM_APPROVE_XDOMAIN;
request.route_hi = upper_32_bits(xd->route); request.route_hi = upper_32_bits(xd->route);
request.route_lo = lower_32_bits(xd->route); request.route_lo = lower_32_bits(xd->route);
request.transmit_path = xd->transmit_path; request.transmit_path = transmit_path;
request.transmit_ring = xd->transmit_ring; request.transmit_ring = transmit_ring;
request.receive_path = xd->receive_path; request.receive_path = receive_path;
request.receive_ring = xd->receive_ring; request.receive_ring = receive_ring;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
memset(&reply, 0, sizeof(reply)); memset(&reply, 0, sizeof(reply));
...@@ -1176,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, ...@@ -1176,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
return 0; return 0;
} }
static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
int ret; int ret;
...@@ -2416,7 +2424,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) ...@@ -2416,7 +2424,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
struct icm *icm; struct icm *icm;
struct tb *tb; struct tb *tb;
tb = tb_domain_alloc(nhi, sizeof(struct icm)); tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm));
if (!tb) if (!tb)
return NULL; return NULL;
......
...@@ -501,6 +501,77 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, ...@@ -501,6 +501,77 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
/**
* tb_property_copy_dir() - Take a deep copy of directory
* @dir: Directory to copy
*
* This function takes a deep copy of @dir and returns back the copy. In
* case of error returns %NULL. The resulting directory needs to be
* released by calling tb_property_free_dir().
*/
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
{
struct tb_property *property, *p = NULL;
struct tb_property_dir *d;
if (!dir)
return NULL;
d = tb_property_create_dir(dir->uuid);
if (!d)
return NULL;
list_for_each_entry(property, &dir->properties, list) {
struct tb_property *p;
p = tb_property_alloc(property->key, property->type);
if (!p)
goto err_free;
p->length = property->length;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
p->value.dir = tb_property_copy_dir(property->value.dir);
if (!p->value.dir)
goto err_free;
break;
case TB_PROPERTY_TYPE_DATA:
p->value.data = kmemdup(property->value.data,
property->length * 4,
GFP_KERNEL);
if (!p->value.data)
goto err_free;
break;
case TB_PROPERTY_TYPE_TEXT:
p->value.text = kzalloc(p->length * 4, GFP_KERNEL);
if (!p->value.text)
goto err_free;
strcpy(p->value.text, property->value.text);
break;
case TB_PROPERTY_TYPE_VALUE:
p->value.immediate = property->value.immediate;
break;
default:
break;
}
list_add_tail(&p->list, &d->properties);
}
return d;
err_free:
kfree(p);
tb_property_free_dir(d);
return NULL;
}
/** /**
* tb_property_add_immediate() - Add immediate property to directory * tb_property_add_immediate() - Add immediate property to directory
* @parent: Directory to add the property * @parent: Directory to add the property
......
...@@ -626,28 +626,6 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) ...@@ -626,28 +626,6 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
TB_CFG_PORT, ADP_CS_4, 1); TB_CFG_PORT, ADP_CS_4, 1);
} }
/**
* tb_port_set_initial_credits() - Set initial port link credits allocated
* @port: Port to set the initial credits
* @credits: Number of credits to to allocate
*
* Set initial credits value to be used for ingress shared buffering.
*/
int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
data &= ~ADP_CS_5_LCA_MASK;
data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/** /**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
* @port: Port whose counters to clear * @port: Port whose counters to clear
...@@ -1331,7 +1309,7 @@ int tb_switch_reset(struct tb_switch *sw) ...@@ -1331,7 +1309,7 @@ int tb_switch_reset(struct tb_switch *sw)
TB_CFG_SWITCH, 2, 2); TB_CFG_SWITCH, 2, 2);
if (res.err) if (res.err)
return res.err; return res.err;
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT); res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
if (res.err > 0) if (res.err > 0)
return -EIO; return -EIO;
return res.err; return res.err;
...@@ -1762,6 +1740,18 @@ static struct attribute *switch_attrs[] = { ...@@ -1762,6 +1740,18 @@ static struct attribute *switch_attrs[] = {
NULL, NULL,
}; };
static bool has_port(const struct tb_switch *sw, enum tb_port_type type)
{
const struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!port->disabled && port->config.type == type)
return true;
}
return false;
}
static umode_t switch_attr_is_visible(struct kobject *kobj, static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n) struct attribute *attr, int n)
{ {
...@@ -1770,7 +1760,8 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, ...@@ -1770,7 +1760,8 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_authorized.attr) { if (attr == &dev_attr_authorized.attr) {
if (sw->tb->security_level == TB_SECURITY_NOPCIE || if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
sw->tb->security_level == TB_SECURITY_DPONLY) sw->tb->security_level == TB_SECURITY_DPONLY ||
!has_port(sw, TB_TYPE_PCIE_UP))
return 0; return 0;
} else if (attr == &dev_attr_device.attr) { } else if (attr == &dev_attr_device.attr) {
if (!sw->device) if (!sw->device)
...@@ -1849,6 +1840,39 @@ static void tb_switch_release(struct device *dev) ...@@ -1849,6 +1840,39 @@ static void tb_switch_release(struct device *dev)
kfree(sw); kfree(sw);
} }
static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct tb_switch *sw = tb_to_switch(dev);
const char *type;
if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
if (add_uevent_var(env, "USB4_VERSION=1.0"))
return -ENOMEM;
}
if (!tb_route(sw)) {
type = "host";
} else {
const struct tb_port *port;
bool hub = false;
/* Device is hub if it has any downstream ports */
tb_switch_for_each_port(sw, port) {
if (!port->disabled && !tb_is_upstream_port(port) &&
tb_port_is_null(port)) {
hub = true;
break;
}
}
type = hub ? "hub" : "device";
}
if (add_uevent_var(env, "USB4_TYPE=%s", type))
return -ENOMEM;
return 0;
}
/* /*
* Currently only need to provide the callbacks. Everything else is handled * Currently only need to provide the callbacks. Everything else is handled
* in the connection manager. * in the connection manager.
...@@ -1882,6 +1906,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = { ...@@ -1882,6 +1906,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
struct device_type tb_switch_type = { struct device_type tb_switch_type = {
.name = "thunderbolt_device", .name = "thunderbolt_device",
.release = tb_switch_release, .release = tb_switch_release,
.uevent = tb_switch_uevent,
.pm = &tb_switch_pm_ops, .pm = &tb_switch_pm_ops,
}; };
...@@ -2542,6 +2567,8 @@ int tb_switch_add(struct tb_switch *sw) ...@@ -2542,6 +2567,8 @@ int tb_switch_add(struct tb_switch *sw)
} }
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
tb_check_quirks(sw);
ret = tb_switch_set_uuid(sw); ret = tb_switch_set_uuid(sw);
if (ret) { if (ret) {
dev_err(&sw->dev, "failed to set UUID\n"); dev_err(&sw->dev, "failed to set UUID\n");
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include "tb_regs.h" #include "tb_regs.h"
#include "tunnel.h" #include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
/** /**
* struct tb_cm - Simple Thunderbolt connection manager * struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels * @tunnel_list: List of active tunnels
...@@ -1077,7 +1079,9 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) ...@@ -1077,7 +1079,9 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
return 0; return 0;
} }
static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct tb_cm *tcm = tb_priv(tb); struct tb_cm *tcm = tb_priv(tb);
struct tb_port *nhi_port, *dst_port; struct tb_port *nhi_port, *dst_port;
...@@ -1089,9 +1093,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1089,9 +1093,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
xd->transmit_path, xd->receive_ring, transmit_ring, receive_path, receive_ring);
xd->receive_path);
if (!tunnel) { if (!tunnel) {
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
return -ENOMEM; return -ENOMEM;
...@@ -1110,29 +1113,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) ...@@ -1110,29 +1113,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0; return 0;
} }
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
struct tb_port *dst_port; struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel; struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel, *n;
struct tb_switch *sw; struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent); sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw); dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
/* list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
* It is possible that the tunnel was already teared down (in if (!tb_tunnel_is_dma(tunnel))
* case of cable disconnect) so it is fine if we cannot find it continue;
* here anymore. if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
*/ continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
tb_deactivate_and_free_tunnel(tunnel); if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
receive_path, receive_ring))
tb_deactivate_and_free_tunnel(tunnel);
}
} }
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{ {
if (!xd->is_unplugged) { if (!xd->is_unplugged) {
mutex_lock(&tb->lock); mutex_lock(&tb->lock);
__tb_disconnect_xdomain_paths(tb, xd); __tb_disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
mutex_unlock(&tb->lock); mutex_unlock(&tb->lock);
} }
return 0; return 0;
...@@ -1208,12 +1222,12 @@ static void tb_handle_hotplug(struct work_struct *work) ...@@ -1208,12 +1222,12 @@ static void tb_handle_hotplug(struct work_struct *work)
* tb_xdomain_remove() so setting XDomain as * tb_xdomain_remove() so setting XDomain as
* unplugged here prevents deadlock if they call * unplugged here prevents deadlock if they call
* tb_xdomain_disable_paths(). We will tear down * tb_xdomain_disable_paths(). We will tear down
* the path below. * all the tunnels below.
*/ */
xd->is_unplugged = true; xd->is_unplugged = true;
tb_xdomain_remove(xd); tb_xdomain_remove(xd);
port->xdomain = NULL; port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd); __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
tb_xdomain_put(xd); tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port); tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
...@@ -1562,7 +1576,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) ...@@ -1562,7 +1576,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm; struct tb_cm *tcm;
struct tb *tb; struct tb *tb;
tb = tb_domain_alloc(nhi, sizeof(*tcm)); tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
if (!tb) if (!tb)
return NULL; return NULL;
......
...@@ -406,8 +406,12 @@ struct tb_cm_ops { ...@@ -406,8 +406,12 @@ struct tb_cm_ops {
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response); const u8 *challenge, u8 *response);
int (*disconnect_pcie_paths)(struct tb *tb); int (*disconnect_pcie_paths)(struct tb *tb);
int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_data_len, u8 *status, const void *tx_data, size_t tx_data_len,
void *rx_data, size_t rx_data_len); void *rx_data, size_t rx_data_len);
...@@ -625,7 +629,7 @@ void tb_domain_exit(void); ...@@ -625,7 +629,7 @@ void tb_domain_exit(void);
int tb_xdomain_init(void); int tb_xdomain_init(void);
void tb_xdomain_exit(void); void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
int tb_domain_add(struct tb *tb); int tb_domain_add(struct tb *tb);
void tb_domain_remove(struct tb *tb); void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb);
...@@ -641,8 +645,12 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); ...@@ -641,8 +645,12 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_disconnect_pcie_paths(struct tb *tb); int tb_domain_disconnect_pcie_paths(struct tb *tb);
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring);
int tb_domain_disconnect_all_paths(struct tb *tb); int tb_domain_disconnect_all_paths(struct tb *tb);
static inline struct tb *tb_domain_get(struct tb *tb) static inline struct tb *tb_domain_get(struct tb *tb)
...@@ -787,32 +795,6 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) ...@@ -787,32 +795,6 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
return false; return false;
} }
static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return true;
}
}
return false;
}
static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
{
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_TGL_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
return true;
}
}
return false;
}
/** /**
* tb_switch_is_usb4() - Is the switch USB4 compliant * tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check * @sw: Switch to check
...@@ -860,7 +842,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) ...@@ -860,7 +842,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_port_unlock(struct tb_port *port); int tb_port_unlock(struct tb_port *port);
int tb_port_enable(struct tb_port *port); int tb_port_enable(struct tb_port *port);
......
This diff is collapsed.
...@@ -794,24 +794,14 @@ static u32 tb_dma_credits(struct tb_port *nhi) ...@@ -794,24 +794,14 @@ static u32 tb_dma_credits(struct tb_port *nhi)
return min(max_credits, 13U); return min(max_credits, 13U);
} }
static int tb_dma_activate(struct tb_tunnel *tunnel, bool active) static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
{
struct tb_port *nhi = tunnel->src_port;
u32 credits;
credits = active ? tb_dma_credits(nhi) : 0;
return tb_port_set_initial_credits(nhi, credits);
}
static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
unsigned int efc, u32 credits)
{ {
int i; int i;
path->egress_fc_enable = efc; path->egress_fc_enable = efc;
path->ingress_fc_enable = TB_PATH_ALL; path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE; path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = isb; path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 5; path->priority = 5;
path->weight = 1; path->weight = 1;
path->clear_fc = true; path->clear_fc = true;
...@@ -825,28 +815,28 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb, ...@@ -825,28 +815,28 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
* @tb: Pointer to the domain structure * @tb: Pointer to the domain structure
* @nhi: Host controller port * @nhi: Host controller port
* @dst: Destination null port which the other domain is connected to * @dst: Destination null port which the other domain is connected to
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Set to %0 if TX path is not needed.
* @transmit_path: HopID used for transmitting packets * @transmit_path: HopID used for transmitting packets
* @receive_ring: NHI ring number used to receive packets from the * @transmit_ring: NHI ring number used to send packets towards the
* other domain. Set to %0 if RX path is not needed. * other domain. Set to %-1 if TX path is not needed.
* @receive_path: HopID used for receiving packets * @receive_path: HopID used for receiving packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %-1 if RX path is not needed.
* *
* Return: Returns a tb_tunnel on success or NULL on failure. * Return: Returns a tb_tunnel on success or NULL on failure.
*/ */
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring, struct tb_port *dst, int transmit_path,
int transmit_path, int receive_ring, int transmit_ring, int receive_path,
int receive_path) int receive_ring)
{ {
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
size_t npaths = 0, i = 0; size_t npaths = 0, i = 0;
struct tb_path *path; struct tb_path *path;
u32 credits; u32 credits;
if (receive_ring) if (receive_ring > 0)
npaths++; npaths++;
if (transmit_ring) if (transmit_ring > 0)
npaths++; npaths++;
if (WARN_ON(!npaths)) if (WARN_ON(!npaths))
...@@ -856,38 +846,96 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, ...@@ -856,38 +846,96 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
if (!tunnel) if (!tunnel)
return NULL; return NULL;
tunnel->activate = tb_dma_activate;
tunnel->src_port = nhi; tunnel->src_port = nhi;
tunnel->dst_port = dst; tunnel->dst_port = dst;
credits = tb_dma_credits(nhi); credits = tb_dma_credits(nhi);
if (receive_ring) { if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
"DMA RX"); "DMA RX");
if (!path) { if (!path) {
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
return NULL; return NULL;
} }
tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
credits);
tunnel->paths[i++] = path; tunnel->paths[i++] = path;
} }
if (transmit_ring) { if (transmit_ring > 0) {
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
"DMA TX"); "DMA TX");
if (!path) { if (!path) {
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
return NULL; return NULL;
} }
tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); tb_dma_init_path(path, TB_PATH_ALL, credits);
tunnel->paths[i++] = path; tunnel->paths[i++] = path;
} }
return tunnel; return tunnel;
} }
/**
* tb_tunnel_match_dma() - Match DMA tunnel
* @tunnel: Tunnel to match
* @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Pass %-1 to ignore.
* @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Pass %-1 to ignore.
*
* This function can be used to match specific DMA tunnel, if there are
* multiple DMA tunnels going through the same XDomain connection.
* Returns true if there is match and false otherwise.
*/
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring)
{
const struct tb_path *tx_path = NULL, *rx_path = NULL;
int i;
if (!receive_ring || !transmit_ring)
return false;
for (i = 0; i < tunnel->npaths; i++) {
const struct tb_path *path = tunnel->paths[i];
if (!path)
continue;
if (tb_port_is_nhi(path->hops[0].in_port))
tx_path = path;
else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
rx_path = path;
}
if (transmit_ring > 0 || transmit_path > 0) {
if (!tx_path)
return false;
if (transmit_ring > 0 &&
(tx_path->hops[0].in_hop_index != transmit_ring))
return false;
if (transmit_path > 0 &&
(tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
return false;
}
if (receive_ring > 0 || receive_path > 0) {
if (!rx_path)
return false;
if (receive_path > 0 &&
(rx_path->hops[0].in_hop_index != receive_path))
return false;
if (receive_ring > 0 &&
(rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
return false;
}
return true;
}
static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
{ {
int ret, up_max_rate, down_max_rate; int ret, up_max_rate, down_max_rate;
......
...@@ -70,9 +70,11 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, ...@@ -70,9 +70,11 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up, struct tb_port *out, int max_up,
int max_down); int max_down);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring, struct tb_port *dst, int transmit_path,
int transmit_path, int receive_ring, int transmit_ring, int receive_path,
int receive_path); int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring);
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up, struct tb_port *down, int max_up,
......
This diff is collapsed.
...@@ -146,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block, ...@@ -146,6 +146,7 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len); size_t block_len);
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len); size_t block_len);
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid); struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
void tb_property_free_dir(struct tb_property_dir *dir); void tb_property_free_dir(struct tb_property_dir *dir);
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
...@@ -179,23 +180,24 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir); ...@@ -179,23 +180,24 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @route: Route string the other domain can be reached * @route: Route string the other domain can be reached
* @vendor: Vendor ID of the remote domain * @vendor: Vendor ID of the remote domain
* @device: Device ID of the demote domain * @device: Device ID of the demote domain
* @local_max_hopid: Maximum input HopID of this host
* @remote_max_hopid: Maximum input HopID of the remote host
* @lock: Lock to serialize access to the following fields of this structure * @lock: Lock to serialize access to the following fields of this structure
* @vendor_name: Name of the vendor (or %NULL if not known) * @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s * @link_speed: Speed of the link in Gb/s
* @link_width: Width of the link (1 or 2) * @link_width: Width of the link (1 or 2)
* @is_unplugged: The XDomain is unplugged * @is_unplugged: The XDomain is unplugged
* @resume: The XDomain is being resumed
* @needs_uuid: If the XDomain does not have @remote_uuid it will be * @needs_uuid: If the XDomain does not have @remote_uuid it will be
* queried first * queried first
* @transmit_path: HopID which the remote end expects us to transmit
* @transmit_ring: Local ring (hop) where outgoing packets are pushed
* @receive_path: HopID which we expect the remote end to transmit
* @receive_ring: Local ring (hop) where incoming packets arrive
* @service_ids: Used to generate IDs for the services * @service_ids: Used to generate IDs for the services
* @properties: Properties exported by the remote domain * @in_hopids: Input HopIDs for DMA tunneling
* @property_block_gen: Generation of @properties * @out_hopids; Output HopIDs for DMA tunneling
* @properties_lock: Lock protecting @properties. * @local_property_block: Local block of properties
* @local_property_block_gen: Generation of @local_property_block
* @local_property_block_len: Length of the @local_property_block in dwords
* @remote_properties: Properties exported by the remote domain
* @remote_property_block_gen: Generation of @remote_properties
* @get_uuid_work: Work used to retrieve @remote_uuid * @get_uuid_work: Work used to retrieve @remote_uuid
* @uuid_retries: Number of times left @remote_uuid is requested before * @uuid_retries: Number of times left @remote_uuid is requested before
* giving up * giving up
...@@ -225,21 +227,23 @@ struct tb_xdomain { ...@@ -225,21 +227,23 @@ struct tb_xdomain {
u64 route; u64 route;
u16 vendor; u16 vendor;
u16 device; u16 device;
unsigned int local_max_hopid;
unsigned int remote_max_hopid;
struct mutex lock; struct mutex lock;
const char *vendor_name; const char *vendor_name;
const char *device_name; const char *device_name;
unsigned int link_speed; unsigned int link_speed;
unsigned int link_width; unsigned int link_width;
bool is_unplugged; bool is_unplugged;
bool resume;
bool needs_uuid; bool needs_uuid;
u16 transmit_path;
u16 transmit_ring;
u16 receive_path;
u16 receive_ring;
struct ida service_ids; struct ida service_ids;
struct tb_property_dir *properties; struct ida in_hopids;
u32 property_block_gen; struct ida out_hopids;
u32 *local_property_block;
u32 local_property_block_gen;
u32 local_property_block_len;
struct tb_property_dir *remote_properties;
u32 remote_property_block_gen;
struct delayed_work get_uuid_work; struct delayed_work get_uuid_work;
int uuid_retries; int uuid_retries;
struct delayed_work get_properties_work; struct delayed_work get_properties_work;
...@@ -252,10 +256,22 @@ struct tb_xdomain { ...@@ -252,10 +256,22 @@ struct tb_xdomain {
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd); int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd); void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
u16 transmit_ring, u16 receive_path, void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
u16 receive_ring); int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
int tb_xdomain_disable_paths(struct tb_xdomain *xd); void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring);
int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring);
static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
{
return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
}
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment