unsigned int sysctl_tcp_notsent_lowat;
        int sysctl_tcp_tw_reuse;
        struct inet_timewait_death_row tcp_death_row;
+       int sysctl_max_syn_backlog;
 
        int sysctl_igmp_max_memberships;
        int sysctl_igmp_max_msf;
 
 /*
  * NET         Generic infrastructure for Network protocols.
  *
- *             Definitions for request_sock 
+ *             Definitions for request_sock
  *
  * Authors:    Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *
                reqsk_free(req);
 }
 
-extern int sysctl_max_syn_backlog;
-
 /*
  * For a TCP Fast Open listener -
  *     lock - protects the access to all the reqsk, which is co-owned by
 
  * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
-int sysctl_max_syn_backlog = 256;
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
 
 void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
 
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_max_syn_backlog",
-               .data           = &sysctl_max_syn_backlog,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "inet_peer_threshold",
                .data           = &inet_peer_threshold,
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_max_syn_backlog",
+               .data           = &init_net.ipv4.sysctl_max_syn_backlog,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        {
                .procname       = "fib_multipath_use_neigh",
 
 
 
        cnt = tcp_hashinfo.ehash_mask + 1;
-
        sysctl_tcp_max_orphans = cnt / 2;
-       sysctl_max_syn_backlog = max(128, cnt / 256);
 
        tcp_init_mem();
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
 
                }
                /* Kill the following clause, if you dislike this way. */
                else if (!net->ipv4.sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
+                        (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
                         !tcp_peer_is_proven(req, dst, false,
                                             tmp_opt.saw_tstamp)) {
                        /* Without syncookies last quarter of
 
 
 static int __net_init tcp_sk_init(struct net *net)
 {
-       int res, cpu;
+       int res, cpu, cnt;
 
        net->ipv4.tcp_sk = alloc_percpu(struct sock *);
        if (!net->ipv4.tcp_sk)
        net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
        net->ipv4.sysctl_tcp_tw_reuse = 0;
 
+       cnt = tcp_hashinfo.ehash_mask + 1;
        net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
-       net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (tcp_hashinfo.ehash_mask + 1) / 2;
+       net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
+       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+
        return 0;
 fail:
        tcp_sk_exit(net);