From: David S. Miller Date: Thu, 2 Nov 2017 05:59:52 +0000 (+0900) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=ed29668d1aa2c6f01e61dd616df13b5241cee7e0;p=linux.git Merge git://git./linux/kernel/git/davem/net Smooth Cong Wang's bug fix into 'net-next'. Basically put the bulk of the tcf_block_put() logic from 'net' into tcf_block_put_ext(), but after the offload unbind. Signed-off-by: David S. Miller --- ed29668d1aa2c6f01e61dd616df13b5241cee7e0 diff --cc include/net/tcp.h index a2510cdef4b56,e6d0002a1b0bc..c2bf2a822b109 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@@ -1733,13 -1768,11 +1733,13 @@@ static inline struct sk_buff *tcp_highe static inline void tcp_highest_sack_reset(struct sock *sk) { - tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); + struct sk_buff *skb = tcp_rtx_queue_head(sk); + + tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk); } - /* Called when old skb is about to be deleted (to be combined with new skb) */ - static inline void tcp_highest_sack_combine(struct sock *sk, + /* Called when old skb is about to be deleted and replaced by new skb */ + static inline void tcp_highest_sack_replace(struct sock *sk, struct sk_buff *old, struct sk_buff *new) { diff --cc net/ipv4/tcp_output.c index a69a34f57330f,823003eef3a21..a85e8a282d173 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -2735,8 -2666,10 +2736,8 @@@ static bool tcp_collapse_retrans(struc else if (!skb_shift(skb, next_skb, next_skb_size)) return false; } - tcp_highest_sack_combine(sk, next_skb, skb); + tcp_highest_sack_replace(sk, next_skb, skb); - tcp_unlink_write_queue(next_skb, sk); - if (next_skb->ip_summed == CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_PARTIAL; diff --cc net/sched/cls_api.c index d9d54b367d232,b2d3107454872..2c03fcbc7188c --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@@ -331,47 -289,22 +331,27 @@@ static void tcf_block_put_final(struct } /* XXX: Standalone actions are not allowed to jump to any chain, and bound - * actions should be all removed after flushing. However, filters are destroyed - * in RCU callbacks, we have to hold the chains first, otherwise we would - * always race with RCU callbacks on this list without proper locking. + * actions should be all removed after flushing. However, filters are now + * destroyed in tc filter workqueue with RTNL lock, they can not race here. */ - static void tcf_block_put_deferred(struct work_struct *work) - { - struct tcf_block *block = container_of(work, struct tcf_block, work); - struct tcf_chain *chain; - - rtnl_lock(); - /* Hold a refcnt for all chains, except 0, in case they are gone. */ - list_for_each_entry(chain, &block->chain_list, list) - if (chain->index) - tcf_chain_hold(chain); - - /* No race on the list, because no chain could be destroyed. */ - list_for_each_entry(chain, &block->chain_list, list) - tcf_chain_flush(chain); - - INIT_WORK(&block->work, tcf_block_put_final); - /* Wait for RCU callbacks to release the reference count and make - * sure their works have been queued before this. - */ - rcu_barrier(); - tcf_queue_work(&block->work); - rtnl_unlock(); - } - -void tcf_block_put(struct tcf_block *block) +void tcf_block_put_ext(struct tcf_block *block, + struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, + struct tcf_block_ext_info *ei) { + struct tcf_chain *chain, *tmp; + if (!block) return; + tcf_block_offload_unbind(block, q, ei); + - INIT_WORK(&block->work, tcf_block_put_deferred); + list_for_each_entry_safe(chain, tmp, &block->chain_list, list) + tcf_chain_flush(chain); + + INIT_WORK(&block->work, tcf_block_put_final); - /* Wait for RCU callbacks to release the reference count and make - * sure their works have been queued before this. + /* Wait for existing RCU callbacks to cool down, make sure their works + * have been queued before this. We can not flush pending works here + * because we are holding the RTNL lock. */ rcu_barrier(); tcf_queue_work(&block->work);