tcp->seen[1].td_maxwin = 0;
 }
 
-#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT        (120 * HZ)
-#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT        (30 * HZ)
-
 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
 {
        const struct nf_conntrack_l4proto *l4proto;
+       struct net *net = nf_ct_net(ct);
        int l4num = nf_ct_protonum(ct);
        unsigned int timeout;
 
        if (!l4proto)
                return;
 
-       if (l4num == IPPROTO_TCP)
-               timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
-       else if (l4num == IPPROTO_UDP)
-               timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
-       else
+       if (l4num == IPPROTO_TCP) {
+               struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+               timeout = tn->offload_pickup;
+       } else if (l4num == IPPROTO_UDP) {
+               struct nf_udp_net *tn = nf_udp_pernet(net);
+
+               timeout = tn->offload_pickup;
+       } else {
                return;
+       }
 
        if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
                ct->timeout = nfct_time_stamp + timeout;
        .automatic_shrinking    = true,
 };
 
+unsigned long flow_offload_get_timeout(struct flow_offload *flow)
+{
+       const struct nf_conntrack_l4proto *l4proto;
+       unsigned long timeout = NF_FLOW_TIMEOUT;
+       struct net *net = nf_ct_net(flow->ct);
+       int l4num = nf_ct_protonum(flow->ct);
+
+       l4proto = nf_ct_l4proto_find(l4num);
+       if (!l4proto)
+               return timeout;
+
+       if (l4num == IPPROTO_TCP) {
+               struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+               timeout = tn->offload_timeout;
+       } else if (l4num == IPPROTO_UDP) {
+               struct nf_udp_net *tn = nf_udp_pernet(net);
+
+               timeout = tn->offload_timeout;
+       }
+
+       return timeout;
+}
+
 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
-       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
 
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
 void flow_offload_refresh(struct nf_flowtable *flow_table,
                          struct flow_offload *flow)
 {
-       flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+       flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
 
        if (likely(!nf_flowtable_hw_offload(flow_table)))
                return;
 
 
        lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
        offload->flow->timeout = max_t(u64, offload->flow->timeout,
-                                      lastused + NF_FLOW_TIMEOUT);
+                                      lastused + flow_offload_get_timeout(offload->flow));
 
        if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
                if (stats[0].pkts)
        __s32 delta;
 
        delta = nf_flow_timeout_delta(flow->timeout);
-       if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
+       if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
                return;
 
        offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);