net: thunderbolt: Enable full end-to-end flow control
authorMika Westerberg <mika.westerberg@linux.intel.com>
Tue, 30 Aug 2022 15:32:49 +0000 (18:32 +0300)
committerDavid S. Miller <davem@davemloft.net>
Wed, 31 Aug 2022 13:05:12 +0000 (14:05 +0100)
USB4NET protocol allows the networking drivers to take advantage of
end-to-end flow control supported by the USB4 host interface. This
should prevent the receiving side from dropping network packets.

In adddition add a module parameter that can be used to turn this off
just in case it causes problems.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/thunderbolt.c

index ab3f0456298026c000000260ff50da01f264a266..8e272d2a61e592cfcca31d5b65ebde4fe36ece02 100644 (file)
@@ -30,6 +30,7 @@
 #define TBNET_RING_SIZE                256
 #define TBNET_LOGIN_RETRIES    60
 #define TBNET_LOGOUT_RETRIES   10
+#define TBNET_E2E              BIT(0)
 #define TBNET_MATCH_FRAGS_ID   BIT(1)
 #define TBNET_64K_FRAMES       BIT(2)
 #define TBNET_MAX_MTU          SZ_64K
@@ -209,6 +210,10 @@ static const uuid_t tbnet_svc_uuid =
 
 static struct tb_property_dir *tbnet_dir;
 
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
 static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
        u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
        enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -873,6 +878,7 @@ static int tbnet_open(struct net_device *dev)
        struct tb_xdomain *xd = net->xd;
        u16 sof_mask, eof_mask;
        struct tb_ring *ring;
+       unsigned int flags;
        int hopid;
 
        netif_carrier_off(dev);
@@ -897,9 +903,14 @@ static int tbnet_open(struct net_device *dev)
        sof_mask = BIT(TBIP_PDF_FRAME_START);
        eof_mask = BIT(TBIP_PDF_FRAME_END);
 
-       ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
-                               RING_FLAG_FRAME, 0, sof_mask, eof_mask,
-                               tbnet_start_poll, net);
+       flags = RING_FLAG_FRAME;
+       /* Only enable full E2E if the other end supports it too */
+       if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+               flags |= RING_FLAG_E2E;
+
+       ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+                               net->tx_ring.ring->hop, sof_mask,
+                               eof_mask, tbnet_start_poll, net);
        if (!ring) {
                netdev_err(dev, "failed to allocate Rx ring\n");
                tb_ring_free(net->tx_ring.ring);
@@ -1362,6 +1373,7 @@ static struct tb_service_driver tbnet_driver = {
 
 static int __init tbnet_init(void)
 {
+       unsigned int flags;
        int ret;
 
        tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1371,12 +1383,11 @@ static int __init tbnet_init(void)
        tb_property_add_immediate(tbnet_dir, "prtcid", 1);
        tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
        tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
-       /* Currently only announce support for match frags ID (bit 1). Bit 0
-        * is reserved for full E2E flow control which we do not support at
-        * the moment.
-        */
-       tb_property_add_immediate(tbnet_dir, "prtcstns",
-                                 TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+       flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+       if (tbnet_e2e)
+               flags |= TBNET_E2E;
+       tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
 
        ret = tb_register_property_dir("network", tbnet_dir);
        if (ret) {