nla_total_size(sizeof(struct ifla_vf_rate)) +
                         nla_total_size(sizeof(struct ifla_vf_link_state)) +
                         nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
-                        nla_total_size(0) + /* nest IFLA_VF_STATS */
-                        /* IFLA_VF_STATS_RX_PACKETS */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_TX_PACKETS */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_RX_BYTES */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_TX_BYTES */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_BROADCAST */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_MULTICAST */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_RX_DROPPED */
-                        nla_total_size_64bit(sizeof(__u64)) +
-                        /* IFLA_VF_STATS_TX_DROPPED */
-                        nla_total_size_64bit(sizeof(__u64)) +
                         nla_total_size(sizeof(struct ifla_vf_trust)));
+               if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
+                       size += num_vfs *
+                               (nla_total_size(0) + /* nest IFLA_VF_STATS */
+                                /* IFLA_VF_STATS_RX_PACKETS */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_TX_PACKETS */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_RX_BYTES */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_TX_BYTES */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_BROADCAST */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_MULTICAST */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_RX_DROPPED */
+                                nla_total_size_64bit(sizeof(__u64)) +
+                                /* IFLA_VF_STATS_TX_DROPPED */
+                                nla_total_size_64bit(sizeof(__u64)));
+               }
                return size;
        } else
                return 0;
 static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
                                               struct net_device *dev,
                                               int vfs_num,
-                                              struct nlattr *vfinfo)
+                                              struct nlattr *vfinfo,
+                                              u32 ext_filter_mask)
 {
        struct ifla_vf_rss_query_en vf_rss_query_en;
        struct nlattr *vf, *vfstats, *vfvlanlist;
                goto nla_put_vf_failure;
        }
        nla_nest_end(skb, vfvlanlist);
-       memset(&vf_stats, 0, sizeof(vf_stats));
-       if (dev->netdev_ops->ndo_get_vf_stats)
-               dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
-                                               &vf_stats);
-       vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
-       if (!vfstats)
-               goto nla_put_vf_failure;
-       if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
-                             vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
-                             vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
-                             vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
-                             vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
-                             vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
-                             vf_stats.multicast, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
-                             vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
-           nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
-                             vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
-               nla_nest_cancel(skb, vfstats);
-               goto nla_put_vf_failure;
+       if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) {
+               memset(&vf_stats, 0, sizeof(vf_stats));
+               if (dev->netdev_ops->ndo_get_vf_stats)
+                       dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
+                                                         &vf_stats);
+               vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS);
+               if (!vfstats)
+                       goto nla_put_vf_failure;
+               if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
+                                     vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
+                                     vf_stats.tx_packets, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES,
+                                     vf_stats.rx_bytes, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES,
+                                     vf_stats.tx_bytes, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
+                                     vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
+                                     vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+                                     vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+                   nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+                                     vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
+                       nla_nest_cancel(skb, vfstats);
+                       goto nla_put_vf_failure;
+               }
+               nla_nest_end(skb, vfstats);
        }
-       nla_nest_end(skb, vfstats);
        nla_nest_end(skb, vf);
        return 0;
 
                return -EMSGSIZE;
 
        for (i = 0; i < num_vfs; i++) {
-               if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
+               if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask))
                        return -EMSGSIZE;
        }