sock: Code cleanup on __sk_mem_raise_allocated()
authorAbel Wu <wuyun.abel@bytedance.com>
Thu, 19 Oct 2023 12:00:24 +0000 (20:00 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 24 Oct 2023 08:38:30 +0000 (10:38 +0200)
Code cleanup for both better simplicity and readability.
No functional change intended.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20231019120026.42215-1-wuyun.abel@bytedance.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
net/core/sock.c

index 290165954379292782a484d378a865cc52ca6753..43842520db865d6baf6f578c5145480707ad65a6 100644 (file)
@@ -3039,17 +3039,19 @@ EXPORT_SYMBOL(sk_wait_data);
  */
 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
 {
-       bool memcg_charge = mem_cgroup_sockets_enabled && sk->sk_memcg;
+       struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL;
        struct proto *prot = sk->sk_prot;
-       bool charged = true;
+       bool charged = false;
        long allocated;
 
        sk_memory_allocated_add(sk, amt);
        allocated = sk_memory_allocated(sk);
-       if (memcg_charge &&
-           !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt,
-                                               gfp_memcg_charge())))
-               goto suppress_allocation;
+
+       if (memcg) {
+               if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()))
+                       goto suppress_allocation;
+               charged = true;
+       }
 
        /* Under limit. */
        if (allocated <= sk_prot_mem_limits(sk, 0)) {
@@ -3104,8 +3106,8 @@ suppress_allocation:
                 */
                if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) {
                        /* Force charge with __GFP_NOFAIL */
-                       if (memcg_charge && !charged) {
-                               mem_cgroup_charge_skmem(sk->sk_memcg, amt,
+                       if (memcg && !charged) {
+                               mem_cgroup_charge_skmem(memcg, amt,
                                        gfp_memcg_charge() | __GFP_NOFAIL);
                        }
                        return 1;
@@ -3117,8 +3119,8 @@ suppress_allocation:
 
        sk_memory_allocated_sub(sk, amt);
 
-       if (memcg_charge && charged)
-               mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
+       if (charged)
+               mem_cgroup_uncharge_skmem(memcg, amt);
 
        return 0;
 }