net: page_pool: implement GET in the netlink API
authorJakub Kicinski <kuba@kernel.org>
Sun, 26 Nov 2023 23:07:34 +0000 (15:07 -0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 28 Nov 2023 14:48:39 +0000 (15:48 +0100)
Expose the very basic page pool information via netlink.

Example using ynl-py for a system with 9 queues:

$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
           --dump page-pool-get
[{'id': 19, 'ifindex': 2, 'napi-id': 147},
 {'id': 18, 'ifindex': 2, 'napi-id': 146},
 {'id': 17, 'ifindex': 2, 'napi-id': 145},
 {'id': 16, 'ifindex': 2, 'napi-id': 144},
 {'id': 15, 'ifindex': 2, 'napi-id': 143},
 {'id': 14, 'ifindex': 2, 'napi-id': 142},
 {'id': 13, 'ifindex': 2, 'napi-id': 141},
 {'id': 12, 'ifindex': 2, 'napi-id': 140},
 {'id': 11, 'ifindex': 2, 'napi-id': 139},
 {'id': 10, 'ifindex': 2, 'napi-id': 138}]

Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
include/uapi/linux/netdev.h
net/core/netdev-genl-gen.c
net/core/netdev-genl-gen.h
net/core/page_pool_user.c

index 2943a151d4f128e1a0cf351a7870bb53bbaebe1c..176665bcf0da105847c709875f9e57d73d6268e9 100644 (file)
@@ -64,11 +64,21 @@ enum {
        NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
 };
 
+enum {
+       NETDEV_A_PAGE_POOL_ID = 1,
+       NETDEV_A_PAGE_POOL_IFINDEX,
+       NETDEV_A_PAGE_POOL_NAPI_ID,
+
+       __NETDEV_A_PAGE_POOL_MAX,
+       NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
+};
+
 enum {
        NETDEV_CMD_DEV_GET = 1,
        NETDEV_CMD_DEV_ADD_NTF,
        NETDEV_CMD_DEV_DEL_NTF,
        NETDEV_CMD_DEV_CHANGE_NTF,
+       NETDEV_CMD_PAGE_POOL_GET,
 
        __NETDEV_CMD_MAX,
        NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
index ea9231378aa6ffccfb31067080b573624151773c..bfde13981c770108e4f05e12edd4a0f02029075b 100644 (file)
 
 #include <uapi/linux/netdev.h>
 
+/* Integer value ranges */
+static const struct netlink_range_validation netdev_a_page_pool_id_range = {
+       .min    = 1ULL,
+       .max    = 4294967295ULL,
+};
+
 /* NETDEV_CMD_DEV_GET - do */
 static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
        [NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
 };
 
+/* NETDEV_CMD_PAGE_POOL_GET - do */
+#ifdef CONFIG_PAGE_POOL
+static const struct nla_policy netdev_page_pool_get_nl_policy[NETDEV_A_PAGE_POOL_ID + 1] = {
+       [NETDEV_A_PAGE_POOL_ID] = NLA_POLICY_FULL_RANGE(NLA_UINT, &netdev_a_page_pool_id_range),
+};
+#endif /* CONFIG_PAGE_POOL */
+
 /* Ops table for netdev */
 static const struct genl_split_ops netdev_nl_ops[] = {
        {
@@ -29,6 +42,20 @@ static const struct genl_split_ops netdev_nl_ops[] = {
                .dumpit = netdev_nl_dev_get_dumpit,
                .flags  = GENL_CMD_CAP_DUMP,
        },
+#ifdef CONFIG_PAGE_POOL
+       {
+               .cmd            = NETDEV_CMD_PAGE_POOL_GET,
+               .doit           = netdev_nl_page_pool_get_doit,
+               .policy         = netdev_page_pool_get_nl_policy,
+               .maxattr        = NETDEV_A_PAGE_POOL_ID,
+               .flags          = GENL_CMD_CAP_DO,
+       },
+       {
+               .cmd    = NETDEV_CMD_PAGE_POOL_GET,
+               .dumpit = netdev_nl_page_pool_get_dumpit,
+               .flags  = GENL_CMD_CAP_DUMP,
+       },
+#endif /* CONFIG_PAGE_POOL */
 };
 
 static const struct genl_multicast_group netdev_nl_mcgrps[] = {
index 7b370c073e7dd18f340e013b3be6a5ef6e7455b1..a011d12abff43f2cb8c269b73a65895875eac1f4 100644 (file)
@@ -13,6 +13,9 @@
 
 int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
 int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info);
+int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
+                                  struct netlink_callback *cb);
 
 enum {
        NETDEV_NLGRP_MGMT,
index 2888aa8dd3e4f81802061ac8b1d4c857ab317b7c..7eb37c31fce94db6497f683d4ae58016a6053c09 100644 (file)
@@ -5,8 +5,10 @@
 #include <linux/xarray.h>
 #include <net/net_debug.h>
 #include <net/page_pool/types.h>
+#include <net/sock.h>
 
 #include "page_pool_priv.h"
+#include "netdev-genl-gen.h"
 
 static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
@@ -26,6 +28,131 @@ static DEFINE_MUTEX(page_pools_lock);
  *    - user.list: unhashed, netdev: unknown
  */
 
+typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
+                            const struct genl_info *info);
+
+static int
+netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
+{
+       struct page_pool *pool;
+       struct sk_buff *rsp;
+       int err;
+
+       mutex_lock(&page_pools_lock);
+       pool = xa_load(&page_pools, id);
+       if (!pool || hlist_unhashed(&pool->user.list) ||
+           !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
+               err = -ENOENT;
+               goto err_unlock;
+       }
+
+       rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!rsp) {
+               err = -ENOMEM;
+               goto err_unlock;
+       }
+
+       err = fill(rsp, pool, info);
+       if (err)
+               goto err_free_msg;
+
+       mutex_unlock(&page_pools_lock);
+
+       return genlmsg_reply(rsp, info);
+
+err_free_msg:
+       nlmsg_free(rsp);
+err_unlock:
+       mutex_unlock(&page_pools_lock);
+       return err;
+}
+
+struct page_pool_dump_cb {
+       unsigned long ifindex;
+       u32 pp_id;
+};
+
+static int
+netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                            pp_nl_fill_cb fill)
+{
+       struct page_pool_dump_cb *state = (void *)cb->ctx;
+       const struct genl_info *info = genl_info_dump(cb);
+       struct net *net = sock_net(skb->sk);
+       struct net_device *netdev;
+       struct page_pool *pool;
+       int err = 0;
+
+       rtnl_lock();
+       mutex_lock(&page_pools_lock);
+       for_each_netdev_dump(net, netdev, state->ifindex) {
+               hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
+                       if (state->pp_id && state->pp_id < pool->user.id)
+                               continue;
+
+                       state->pp_id = pool->user.id;
+                       err = fill(skb, pool, info);
+                       if (err)
+                               break;
+               }
+
+               state->pp_id = 0;
+       }
+       mutex_unlock(&page_pools_lock);
+       rtnl_unlock();
+
+       if (skb->len && err == -EMSGSIZE)
+               return skb->len;
+       return err;
+}
+
+static int
+page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
+                 const struct genl_info *info)
+{
+       void *hdr;
+
+       hdr = genlmsg_iput(rsp, info);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
+               goto err_cancel;
+
+       if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
+           nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
+                       pool->slow.netdev->ifindex))
+               goto err_cancel;
+       if (pool->user.napi_id &&
+           nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
+               goto err_cancel;
+
+       genlmsg_end(rsp, hdr);
+
+       return 0;
+err_cancel:
+       genlmsg_cancel(rsp, hdr);
+       return -EMSGSIZE;
+}
+
+int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+       u32 id;
+
+       if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
+               return -EINVAL;
+
+       id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
+
+       return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
+}
+
+int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
+}
+
 int page_pool_list(struct page_pool *pool)
 {
        static u32 id_alloc_next;