dev->divert = NULL;
        if (dev->type == ARPHRD_ETHER) {
-               dev->divert = (struct divert_blk *)
-                       kmalloc(alloc_size, GFP_KERNEL);
+               dev->divert = kzalloc(alloc_size, GFP_KERNEL);
                if (dev->divert == NULL) {
                        printk(KERN_INFO "divert: unable to allocate divert_blk for %s\n",
                               dev->name);
                        return -ENOMEM;
                }
-
-               memset(dev->divert, 0, sizeof(struct divert_blk));
                dev_hold(dev);
        }
 
 
                /* NOTHING */;
 
        flow_table(cpu) = (struct flow_cache_entry **)
-               __get_free_pages(GFP_KERNEL, order);
+               __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
        if (!flow_table(cpu))
                panic("NET: failed to allocate flow cache order %lu\n", order);
 
-       memset(flow_table(cpu), 0, PAGE_SIZE << order);
-
        flow_hash_rnd_recalc(cpu) = 1;
        flow_count(cpu) = 0;
 
 
        if (parm->interval < -2 || parm->interval > 3)
                return -EINVAL;
 
-       est = kmalloc(sizeof(*est), GFP_KERNEL);
+       est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (est == NULL)
                return -ENOBUFS;
 
-       memset(est, 0, sizeof(*est));
        est->interval = parm->interval + 2;
        est->bstats = bstats;
        est->rate_est = rate_est;
 
        struct neighbour **ret;
 
        if (size <= PAGE_SIZE) {
-               ret = kmalloc(size, GFP_ATOMIC);
+               ret = kzalloc(size, GFP_ATOMIC);
        } else {
                ret = (struct neighbour **)
-                       __get_free_pages(GFP_ATOMIC, get_order(size));
+                     __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
        }
-       if (ret)
-               memset(ret, 0, size);
-
        return ret;
 }
 
                if (hh->hh_type == protocol)
                        break;
 
-       if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
-               memset(hh, 0, sizeof(struct hh_cache));
+       if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
                rwlock_init(&hh->hh_lock);
                hh->hh_type = protocol;
                atomic_set(&hh->hh_refcnt, 0);
        tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
 
        phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
-       tbl->phash_buckets = kmalloc(phsize, GFP_KERNEL);
+       tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
 
        if (!tbl->hash_buckets || !tbl->phash_buckets)
                panic("cannot allocate neighbour cache hashes");
 
-       memset(tbl->phash_buckets, 0, phsize);
-
        get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
 
        rwlock_init(&tbl->lock);
 
 {
        const int lopt_size = sizeof(struct listen_sock) +
                              nr_table_entries * sizeof(struct request_sock *);
-       struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL);
+       struct listen_sock *lopt = kzalloc(lopt_size, GFP_KERNEL);
 
        if (lopt == NULL)
                return -ENOMEM;
 
-       memset(lopt, 0, lopt_size);
-
        for (lopt->max_qlen_log = 6;
             (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog;
             lopt->max_qlen_log++);