From: David S. Miller Date: Wed, 17 Feb 2021 01:30:20 +0000 (-0800) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=d489ded1a3690d7eca8633575cba3f7dac8484c7;p=linux.git Merge git://git./linux/kernel/git/netdev/net --- d489ded1a3690d7eca8633575cba3f7dac8484c7 diff --cc drivers/net/ethernet/ibm/ibmvnic.c index 927d5f36d3081,13ae7eee7ef5f..5cf7e5a367f0a --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -2288,7 -2395,10 +2294,8 @@@ static int ibmvnic_reset(struct ibmvnic unsigned long flags; int ret; - /* If failover is pending don't schedule any other reset. - spin_lock_irqsave(&adapter->rwi_lock, flags); - + /* + * If failover is pending don't schedule any other reset. * Instead let the failover complete. If there is already a * a failover reset scheduled, we will detect and drop the * duplicate reset when walking the ->rwi_list below. diff --cc drivers/net/ethernet/ibm/ibmvnic.h index 270d1cac86a4d,72fea3b1c87d9..e4dcc63b9710b --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@@ -1081,12 -1080,10 +1081,14 @@@ struct ibmvnic_adapter struct tasklet_struct tasklet; enum vnic_state state; + /* Used for serializatin of state field */ + spinlock_t state_lock; enum ibmvnic_reset_reason reset_reason; + /* when taking both state and rwi locks, take state lock first */ + spinlock_t rwi_lock; struct list_head rwi_list; + /* Used for serialization of rwi_list */ + spinlock_t rwi_lock; struct work_struct ibmvnic_reset; struct delayed_work ibmvnic_delayed_reset; unsigned long resetting; diff --cc drivers/net/ethernet/mellanox/mlx5/core/devlink.c index aa76a6e0dae85,41474e42a819a..d7d8a68ef23d7 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@@ -129,18 -127,12 +129,23 @@@ static int mlx5_devlink_reload_down(str struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); + bool sf_dev_allocated; + + sf_dev_allocated = mlx5_sf_dev_allocated(dev); + if (sf_dev_allocated) { + /* Reload results in deleting SF device which further results in + * unregistering devlink instance while holding devlink_mutext. + * Hence, do not support reload. + */ + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n"); + return -EOPNOTSUPP; + } + if (mlx5_lag_is_active(dev)) { + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n"); + return -EOPNOTSUPP; + } + switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: mlx5_unload_one(dev, false); diff --cc drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 0b503ebe59ecc,24e2c0d955b99..f3f6eb0819489 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@@ -793,20 -884,32 +885,30 @@@ mlx5_tc_ct_shared_counter_get(struct ml } /* Use the same counter as the reverse direction */ - mutex_lock(&ct_priv->shared_counter_lock); - rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple, - tuples_ht_params); - if (rev_entry) { - if (refcount_inc_not_zero(&rev_entry->counter->refcount)) { - mutex_unlock(&ct_priv->shared_counter_lock); - return rev_entry->counter; - } + spin_lock_bh(&ct_priv->ht_lock); + rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple); + + if (IS_ERR(rev_entry)) { + spin_unlock_bh(&ct_priv->ht_lock); + goto create_counter; } - mutex_unlock(&ct_priv->shared_counter_lock); + + if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) { + ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry); + shared_counter = rev_entry->counter; + spin_unlock_bh(&ct_priv->ht_lock); + + mlx5_tc_ct_entry_put(rev_entry); + return shared_counter; + } + + spin_unlock_bh(&ct_priv->ht_lock); + + create_counter: shared_counter = mlx5_tc_ct_counter_create(ct_priv); - if (IS_ERR(shared_counter)) { - ret = PTR_ERR(shared_counter); - return ERR_PTR(ret); - } + if (IS_ERR(shared_counter)) + return shared_counter; shared_counter->is_shared = true; refcount_set(&shared_counter->refcount, 1); diff --cc drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c8866c14b8a37,a2e0b548bf570..39acbc83682d3 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -65,8 -65,7 +65,9 @@@ #include "en/devlink.h" #include "lib/mlx5.h" #include "en/ptp.h" +#include "qos.h" +#include "en/trap.h" + #include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@@ -2122,8 -2069,10 +2123,13 @@@ static void mlx5e_build_rq_frags_info(s u32 buf_size = 0; int i; ++<<<<<<< HEAD + if (MLX5_IPSEC_DEV(mdev)) ++======= + #ifdef CONFIG_MLX5_EN_IPSEC + if (mlx5_fpga_is_ipsec_device(mdev)) ++>>>>>>> 3af409ca278d4a8d50e91f9f7c4c33b175645cf3 byte_count += MLX5E_METADATA_ETHER_LEN; -#endif if (mlx5e_rx_is_linear_skb(params, xsk)) { int frag_stride; diff --cc drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index fac96ea819a1d,4864deed9dc94..1b6ad94ebb103 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@@ -1783,10 -1794,12 +1783,10 @@@ int mlx5e_rq_set_handlers(struct mlx5e_ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; - if (MLX5_IPSEC_DEV(mdev)) { - netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n"); -#ifdef CONFIG_MLX5_EN_IPSEC + if (mlx5_fpga_is_ipsec_device(mdev)) { + netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); return -EINVAL; } -#endif if (!rq->handle_rx_cqe) { netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); return -EINVAL; diff --cc drivers/net/ethernet/realtek/r8169_main.c index cbc30df4e08a4,e7a59dc5fe498..9ce98e3d3f9f7 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@@ -2228,10 -2202,68 +2228,35 @@@ static void rtl_prepare_power_down(stru if (device_may_wakeup(tp_to_dev(tp))) { phy_speed_down(tp->phydev, false); - rtl_wol_suspend_quirk(tp); - return; + rtl_wol_enable_rx(tp); } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000); + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); + break; + default: + break; + } } -static void rtl_pll_power_up(struct rtl8169_private *tp) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_43: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); - break; - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_49: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - rtl_eri_set_bits(tp, 0x1a8, 0xfc000000); - break; - default: - break; - } - - phy_resume(tp->phydev); -} - static void rtl_init_rxcfg(struct rtl8169_private *tp) { switch (tp->mac_version) { diff --cc include/uapi/linux/pkt_cls.h index afe6836e44b15,88f4bf0047e7a..7ea59cfe1fa72 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@@ -591,8 -591,8 +591,9 @@@ enum TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */ TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */ TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */ - + TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */ + TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */ + __TCA_FLOWER_KEY_CT_FLAGS_MAX, }; enum { diff --cc net/mptcp/subflow.c index ce2dea2a6e0a2,8b2338dfdc807..06e233410e0e5 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@@ -108,23 -100,8 +108,13 @@@ static void subflow_init_req(struct req subflow_req->mp_join = 0; subflow_req->msk = NULL; mptcp_token_init_request(req); - - #ifdef CONFIG_TCP_MD5SIG - /* no MPTCP if MD5SIG is enabled on this socket or we may run out of - * TCP option space. - */ - if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) - return -EINVAL; - #endif - - return 0; } +static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk) +{ + return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport; +} + /* Init mptcp request socket. * * Returns an error code if a JOIN has failed and a TCP reset @@@ -1118,12 -1032,49 +1110,52 @@@ static void subflow_data_ready(struct s static void subflow_write_space(struct sock *ssk) { - /* we take action in __mptcp_clean_una() */ + struct sock *sk = mptcp_subflow_ctx(ssk)->conn; + + mptcp_propagate_sndbuf(sk, ssk); + mptcp_write_space(sk); } + void __mptcp_error_report(struct sock *sk) + { + struct mptcp_subflow_context *subflow; + struct mptcp_sock *msk = mptcp_sk(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + int err = sock_error(ssk); + + if (!err) + continue; + + /* only propagate errors on fallen-back sockets or + * on MPC connect + */ + if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk)) + continue; + + inet_sk_state_store(sk, inet_sk_state_load(ssk)); + sk->sk_err = -err; + + /* This barrier is coupled with smp_rmb() in mptcp_poll() */ + smp_wmb(); + sk->sk_error_report(sk); + break; + } + } + + static void subflow_error_report(struct sock *ssk) + { + struct sock *sk = mptcp_subflow_ctx(ssk)->conn; + + mptcp_data_lock(sk); + if (!sock_owned_by_user(sk)) + __mptcp_error_report(sk); + else + set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags); + mptcp_data_unlock(sk); + } + static struct inet_connection_sock_af_ops * subflow_default_af_ops(struct sock *sk) { diff --cc tools/testing/selftests/net/forwarding/tc_flower.sh index a554838666c42,b11d8e6b5bc14..4b58ccae34290 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@@ -3,9 -3,7 +3,9 @@@ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \ match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \ - match_ip_tos_test match_indev_test match_mpls_label_test \ - match_ip_tos_test match_indev_test match_ip_ttl_test" ++ match_ip_tos_test match_indev_testmatch_ip_ttl_test match_mpls_label_test \ + match_mpls_tc_test match_mpls_bos_test match_mpls_ttl_test \ + match_mpls_lse_test" NUM_NETIFS=2 source tc_common.sh source lib.sh