static inline int lnet_md_exhausted(lnet_libmd_t *md)
 {
-       return (md->md_threshold == 0 ||
-               ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
+       return (!md->md_threshold ||
+               ((md->md_options & LNET_MD_MAX_SIZE) &&
                 md->md_offset + md->md_max_size > md->md_length));
 }
 
         *    LNetM[DE]Unlink, in the latter case md may not be exhausted).
         *  - auto unlink is on and md is exhausted.
         */
-       if (md->md_refcount != 0)
+       if (md->md_refcount)
                return 0;
 
-       if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
+       if (md->md_flags & LNET_MD_FLAG_ZOMBIE)
                return 1;
 
-       return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
+       return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) &&
                lnet_md_exhausted(md));
 }
 
        unsigned int size;
        unsigned int niov;
 
-       if ((umd->options & LNET_MD_KIOV) != 0) {
+       if (umd->options & LNET_MD_KIOV) {
                niov = umd->length;
                size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
        } else {
-               niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
-                      umd->length : 1;
+               niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
                size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
        }
 
 {
        unsigned int size;
 
-       if ((md->md_options & LNET_MD_KIOV) != 0)
+       if (md->md_options & LNET_MD_KIOV)
                size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
        else
                size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
 {
        LASSERT(lp->lp_refcount > 0);
        lp->lp_refcount--;
-       if (lp->lp_refcount == 0)
+       if (!lp->lp_refcount)
                lnet_destroy_peer_locked(lp);
 }
 
 static inline int
 lnet_isrouter(lnet_peer_t *lp)
 {
-       return lp->lp_rtr_refcount != 0;
+       return lp->lp_rtr_refcount ? 1 : 0;
 }
 
 static inline void
 
  * peer aliveness is enabled only on routers for peers in a network where the
  * lnet_ni_t::ni_peertimeout has been set to a positive value
  */
-#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
+#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
                                         (lp)->lp_ni->ni_peertimeout > 0)
 
 typedef struct {
 
                sum = ((sum << 1) | (sum >> 31)) + *c++;
 
        /* ensure I don't return 0 (== no checksum) */
-       return (sum == 0) ? 1 : sum;
+       return !sum ? 1 : sum;
 }
 
 static char *kiblnd_msgtype2str(int type)
         */
        msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
        msg->ibm_cksum = 0;
-       if (msg_cksum != 0 &&
+       if (msg_cksum &&
            msg_cksum != kiblnd_cksum(msg, msg_nob)) {
                CERROR("Bad checksum\n");
                return -EPROTO;
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
        /* always called with a ref on ni, which prevents ni being shutdown */
-       LASSERT(net->ibn_shutdown == 0);
+       LASSERT(!net->ibn_shutdown);
 
        /* npeers only grows with the global lock held */
        atomic_inc(&net->ibn_npeers);
        kib_net_t *net = peer->ibp_ni->ni_data;
 
        LASSERT(net);
-       LASSERT(atomic_read(&peer->ibp_refcount) == 0);
+       LASSERT(!atomic_read(&peer->ibp_refcount));
        LASSERT(!kiblnd_peer_active(peer));
-       LASSERT(peer->ibp_connecting == 0);
-       LASSERT(peer->ibp_accepting == 0);
+       LASSERT(!peer->ibp_connecting);
+       LASSERT(!peer->ibp_accepting);
        LASSERT(list_empty(&peer->ibp_conns));
        LASSERT(list_empty(&peer->ibp_tx_queue));
 
 
        mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
        LASSERT(mtu >= 0);
-       if (mtu != 0)
+       if (mtu)
                cmid->route.path_rec->mtu = mtu;
 }
 
        /* hash NID to CPU id in this partition... */
        off = do_div(nid, cpumask_weight(mask));
        for_each_cpu(i, mask) {
-               if (off-- == 0)
+               if (!off--)
                        return i % vectors;
        }
 
 
        rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
                                IBLND_RX_MSG_PAGES(version));
-       if (rc != 0)
+       if (rc)
                goto failed_2;
 
        kiblnd_map_rx_descs(conn);
        conn->ibc_cq = cq;
 
        rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't request completion notificiation: %d\n", rc);
                goto failed_2;
        }
        conn->ibc_sched = sched;
 
        rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
                       rc, init_qp_attr->cap.max_send_wr,
                       init_qp_attr->cap.max_recv_wr);
        for (i = 0; i < IBLND_RX_MSGS(version); i++) {
                rc = kiblnd_post_rx(&conn->ibc_rxs[i],
                                    IBLND_POSTRX_NO_CREDIT);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't post rxmsg: %d\n", rc);
 
                        /* Make posted receives complete */
        int rc;
 
        LASSERT(!in_interrupt());
-       LASSERT(atomic_read(&conn->ibc_refcount) == 0);
+       LASSERT(!atomic_read(&conn->ibc_refcount));
        LASSERT(list_empty(&conn->ibc_early_rxs));
        LASSERT(list_empty(&conn->ibc_tx_noops));
        LASSERT(list_empty(&conn->ibc_tx_queue));
        LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
        LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
        LASSERT(list_empty(&conn->ibc_active_txs));
-       LASSERT(conn->ibc_noops_posted == 0);
-       LASSERT(conn->ibc_nsends_posted == 0);
+       LASSERT(!conn->ibc_noops_posted);
+       LASSERT(!conn->ibc_nsends_posted);
 
        switch (conn->ibc_state) {
        default:
 
        if (conn->ibc_cq) {
                rc = ib_destroy_cq(conn->ibc_cq);
-               if (rc != 0)
+               if (rc)
                        CWARN("Error destroying CQ: %d\n", rc);
        }
 
        if (nid == LNET_NID_ANY)
                return 0;
 
-       return (count == 0) ? -ENOENT : 0;
+       return !count ? -ENOENT : 0;
 }
 
 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
        read_unlock_irqrestore(glock, flags);
 
-       if (last_alive != 0)
+       if (last_alive)
                *when = last_alive;
 
        /*
        kib_tx_t *tx;
        int i;
 
-       LASSERT(tpo->tpo_pool.po_allocated == 0);
+       LASSERT(!tpo->tpo_pool.po_allocated);
 
        if (!hdev)
                return;
        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        while (dev->ibd_failover) {
                read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-               if (i++ % 50 == 0)
+               if (!(i++ % 50))
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
                schedule_timeout(cfs_time_seconds(1) / 100);
        CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
 
        /* No fancy arithmetic when we do the buffer calculations */
-       CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
+       CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
 
        tpo->tpo_hdev = kiblnd_current_hdev(dev);
 
 
 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
 {
-       LASSERT(pool->fpo_map_count == 0);
+       LASSERT(!pool->fpo_map_count);
 
        if (pool->fpo_fmr_pool)
                ib_destroy_fmr_pool(pool->fpo_fmr_pool);
                                                 kib_fmr_pool_t, fpo_list);
                fpo->fpo_failed = 1;
                list_del(&fpo->fpo_list);
-               if (fpo->fpo_map_count == 0)
+               if (!fpo->fpo_map_count)
                        list_add(&fpo->fpo_list, zombies);
                else
                        list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
        INIT_LIST_HEAD(&fps->fps_failed_pool_list);
 
        rc = kiblnd_create_fmr_pool(fps, &fpo);
-       if (rc == 0)
+       if (!rc)
                list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
 
        return rc;
 
 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
 {
-       if (fpo->fpo_map_count != 0) /* still in use */
+       if (fpo->fpo_map_count) /* still in use */
                return 0;
        if (fpo->fpo_failed)
                return 1;
        int rc;
 
        rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
-       if (status != 0) {
+       if (status) {
                rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
        }
 
        fmr->fmr_pool = NULL;
        rc = kiblnd_create_fmr_pool(fps, &fpo);
        spin_lock(&fps->fps_lock);
        fps->fps_increasing = 0;
-       if (rc == 0) {
+       if (!rc) {
                fps->fps_version++;
                list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
        } else {
 static void kiblnd_fini_pool(kib_pool_t *pool)
 {
        LASSERT(list_empty(&pool->po_free_list));
-       LASSERT(pool->po_allocated == 0);
+       LASSERT(!pool->po_allocated);
 
        CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
 }
                                            kib_pool_t, po_list);
                po->po_failed = 1;
                list_del(&po->po_list);
-               if (po->po_allocated == 0)
+               if (!po->po_allocated)
                        list_add(&po->po_list, zombies);
                else
                        list_add(&po->po_list, &ps->ps_failed_pool_list);
        INIT_LIST_HEAD(&ps->ps_failed_pool_list);
 
        rc = ps->ps_pool_create(ps, size, &pool);
-       if (rc == 0)
+       if (!rc)
                list_add(&pool->po_list, &ps->ps_pool_list);
        else
                CERROR("Failed to create the first pool for %s\n", ps->ps_name);
 
 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
 {
-       if (pool->po_allocated != 0) /* still in use */
+       if (pool->po_allocated) /* still in use */
                return 0;
        if (pool->po_failed)
                return 1;
 
        spin_lock(&ps->ps_lock);
        ps->ps_increasing = 0;
-       if (rc == 0) {
+       if (!rc) {
                list_add_tail(&pool->po_list, &ps->ps_pool_list);
        } else {
                ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
        kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
        int i;
 
-       LASSERT(pool->po_allocated == 0);
+       LASSERT(!pool->po_allocated);
 
        if (tpo->tpo_tx_pages) {
                kiblnd_unmap_tx_pool(tpo);
        tpo->tpo_tx_pages = NULL;
 
        npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
-       if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
+       if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
                CERROR("Can't allocate tx pages: %d\n", npg);
                LIBCFS_FREE(tpo, sizeof(*tpo));
                return -ENOMEM;
        int i;
 
        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-       if (*kiblnd_tunables.kib_map_on_demand == 0 &&
+       if (!*kiblnd_tunables.kib_map_on_demand &&
            net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
                read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
                goto create_tx_pool;
                rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
                                             kiblnd_fmr_pool_size(ncpts),
                                             kiblnd_fmr_flush_trigger(ncpts));
-               if (rc == -ENOSYS && i == 0) /* no FMR */
+               if (rc == -ENOSYS && !i) /* no FMR */
                        break;
 
-               if (rc != 0) { /* a real error */
+               if (rc) { /* a real error */
                        CERROR("Can't initialize FMR pool for CPT %d: %d\n",
                               cpt, rc);
                        goto failed;
                                         kiblnd_create_tx_pool,
                                         kiblnd_destroy_tx_pool,
                                         kiblnd_tx_init, NULL);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't initialize TX pool for CPT %d: %d\n",
                               cpt, rc);
                        goto failed;
        return 0;
  failed:
        kiblnd_net_fini_pools(net);
-       LASSERT(rc != 0);
+       LASSERT(rc);
        return rc;
 }
 
 {
        int i;
 
-       if (hdev->ibh_nmrs == 0 || !hdev->ibh_mrs)
+       if (!hdev->ibh_nmrs || !hdev->ibh_mrs)
                return;
 
        for (i = 0; i < hdev->ibh_nmrs; i++) {
        int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
 
        rc = kiblnd_hdev_get_attr(hdev);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
        dstaddr.sin_family = AF_INET;
        rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
                               (struct sockaddr *)&dstaddr, 1);
-       if (rc != 0 || !cmid->device) {
+       if (rc || !cmid->device) {
                CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
                       dev->ibd_ifname, &dev->ibd_ifip,
                       cmid->device, rc);
 
        /* Bind to failover device or port */
        rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
-       if (rc != 0 || !cmid->device) {
+       if (rc || !cmid->device) {
                CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
                       dev->ibd_ifname, &dev->ibd_ifip,
                       cmid->device, rc);
        hdev->ibh_pd = pd;
 
        rc = rdma_listen(cmid, 0);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't start new listener: %d\n", rc);
                goto out;
        }
 
        rc = kiblnd_hdev_setup_mrs(hdev);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't setup device: %d\n", rc);
                goto out;
        }
        if (hdev)
                kiblnd_hdev_decref(hdev);
 
-       if (rc != 0)
+       if (rc)
                dev->ibd_failed_failover++;
        else
                dev->ibd_failed_failover = 0;
 
 void kiblnd_destroy_dev(kib_dev_t *dev)
 {
-       LASSERT(dev->ibd_nnets == 0);
+       LASSERT(!dev->ibd_nnets);
        LASSERT(list_empty(&dev->ibd_nets));
 
        list_del(&dev->ibd_fail_list);
        int rc;
 
        rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't query IPoIB interface %s: %d\n",
                       ifname, rc);
                return NULL;
 
        /* initialize the device */
        rc = kiblnd_dev_failover(dev);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't initialize device: %d\n", rc);
                LIBCFS_FREE(dev, sizeof(*dev));
                return NULL;
                wake_up_all(&kiblnd_data.kib_failover_waitq);
 
                i = 2;
-               while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               while (atomic_read(&kiblnd_data.kib_nthreads)) {
                        i++;
                        /* power of 2 ? */
                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
 
                /* Wait for all peer state to clean up */
                i = 2;
-               while (atomic_read(&net->ibn_npeers) != 0) {
+               while (atomic_read(&net->ibn_npeers)) {
                        i++;
                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
                               "%s: waiting for %d peers to disconnect\n",
                /* fall through */
 
        case IBLND_INIT_NOTHING:
-               LASSERT(atomic_read(&net->ibn_nconns) == 0);
+               LASSERT(!atomic_read(&net->ibn_nconns));
 
-               if (net->ibn_dev &&
-                   net->ibn_dev->ibd_nnets == 0)
+               if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
                        kiblnd_destroy_dev(net->ibn_dev);
 
                break;
        /*****************************************************/
 
        rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't spawn o2iblnd connd: %d\n", rc);
                goto failed;
        }
 
-       if (*kiblnd_tunables.kib_dev_failover != 0)
+       if (*kiblnd_tunables.kib_dev_failover)
                rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
                                         "kiblnd_failover");
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
                goto failed;
        }
        int nthrs;
        int i;
 
-       if (sched->ibs_nthreads == 0) {
+       if (!sched->ibs_nthreads) {
                if (*kiblnd_tunables.kib_nscheds > 0) {
                        nthrs = sched->ibs_nthreads_max;
                } else {
                snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
                         KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
                rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
-               if (rc == 0)
+               if (!rc)
                        continue;
 
                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
                        continue;
 
                rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Failed to start scheduler threads for %s\n",
                               dev->ibd_ifname);
                        return rc;
 
        colon = strchr(ifname, ':');
        list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
-               if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+               if (!strcmp(&dev->ibd_ifname[0], ifname))
                        return dev;
 
                if (alias)
                if (colon2)
                        *colon2 = 0;
 
-               if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+               if (!strcmp(&dev->ibd_ifname[0], ifname))
                        alias = dev;
 
                if (colon)
 
        if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
                rc = kiblnd_base_startup();
-               if (rc != 0)
+               if (rc)
                        return rc;
        }
 
 
        newdev = !ibdev;
        /* hmm...create kib_dev even for alias */
-       if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
+       if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
                ibdev = kiblnd_create_dev(ifname);
 
        if (!ibdev)
 
        rc = kiblnd_dev_start_threads(ibdev, newdev,
                                      ni->ni_cpts, ni->ni_ncpts);
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Failed to initialize NI pools: %d\n", rc);
                goto failed;
        }
                          <= IBLND_MSG_SIZE);
 
        rc = kiblnd_tunables_init();
-       if (rc != 0)
+       if (rc)
                return rc;
 
        lnet_register_lnd(&the_o2iblnd);
 
 
 #define IBLND_MSG_SIZE         (4 << 10)        /* max size of queued messages (inc hdr) */
 #define IBLND_MAX_RDMA_FRAGS    LNET_MAX_IOV      /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+#define IBLND_CFG_RDMA_FRAGS       (*kiblnd_tunables.kib_map_on_demand ? \
                                    *kiblnd_tunables.kib_map_on_demand :      \
                                     IBLND_MAX_RDMA_FRAGS)  /* max # of fragments configured by user */
 #define IBLND_RDMA_FRAGS(v)    ((v) == IBLND_MSG_VERSION_1 ? \
        if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
                return 0;
 
-       if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+       if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
                return 0;
 
        if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
 
                /* No tx to piggyback NOOP onto or no credit to send a tx */
                return (list_empty(&conn->ibc_tx_queue) ||
-                       conn->ibc_credits == 0);
+                       !conn->ibc_credits);
        }
 
        if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
            !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
-           conn->ibc_credits == 0)                 /* no credit */
+           !conn->ibc_credits)             /* no credit */
                return 0;
 
        if (conn->ibc_credits == 1 &&      /* last credit reserved for */
-           conn->ibc_outstanding_credits == 0) /* giving back credits */
+           !conn->ibc_outstanding_credits) /* giving back credits */
                return 0;
 
        /* No tx to piggyback NOOP onto or no credit to send a tx */
 {
        unsigned long lptr = (unsigned long)ptr;
 
-       LASSERT((lptr & IBLND_WID_MASK) == 0);
-       LASSERT((type & ~IBLND_WID_MASK) == 0);
+       LASSERT(!(lptr & IBLND_WID_MASK));
+       LASSERT(!(type & ~IBLND_WID_MASK));
        return (__u64)(lptr | type);
 }
 
 
        LASSERT(net);
        LASSERT(!in_interrupt());
        LASSERT(!tx->tx_queued);               /* mustn't be queued for sending */
-       LASSERT(tx->tx_sending == 0);     /* mustn't be awaiting sent callback */
+       LASSERT(!tx->tx_sending);         /* mustn't be awaiting sent callback */
        LASSERT(!tx->tx_waiting);             /* mustn't be awaiting peer response */
        LASSERT(tx->tx_pool);
 
                return NULL;
        tx = container_of(node, kib_tx_t, tx_list);
 
-       LASSERT(tx->tx_nwrq == 0);
+       LASSERT(!tx->tx_nwrq);
        LASSERT(!tx->tx_queued);
-       LASSERT(tx->tx_sending == 0);
+       LASSERT(!tx->tx_sending);
        LASSERT(!tx->tx_waiting);
-       LASSERT(tx->tx_status == 0);
+       LASSERT(!tx->tx_status);
        LASSERT(!tx->tx_conn);
        LASSERT(!tx->tx_lntmsg[0]);
        LASSERT(!tx->tx_lntmsg[1]);
-       LASSERT(tx->tx_nfrags == 0);
+       LASSERT(!tx->tx_nfrags);
 
        return tx;
 }
         */
        kiblnd_conn_addref(conn);
        rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
-       if (unlikely(rc != 0)) {
+       if (unlikely(rc)) {
                CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
                       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
                rx->rx_nob = 0;
        if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
                goto out;
 
-       if (unlikely(rc != 0)) {
+       if (unlikely(rc)) {
                kiblnd_close_conn(conn, rc);
                kiblnd_drop_rx(rx);          /* No more posts for this rx */
                goto out;
                kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
 
                LASSERT(!tx->tx_queued);
-               LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
+               LASSERT(tx->tx_sending || tx->tx_waiting);
 
                if (tx->tx_cookie != cookie)
                        continue;
                return;
        }
 
-       if (tx->tx_status == 0) {              /* success so far */
+       if (!tx->tx_status) {          /* success so far */
                if (status < 0) /* failed? */
                        tx->tx_status = status;
                else if (txtype == IBLND_MSG_GET_REQ)
 
        tx->tx_waiting = 0;
 
-       idle = !tx->tx_queued && (tx->tx_sending == 0);
+       idle = !tx->tx_queued && !tx->tx_sending;
        if (idle)
                list_del(&tx->tx_list);
 
               msg->ibm_type, credits,
               libcfs_nid2str(conn->ibc_peer->ibp_nid));
 
-       if (credits != 0) {
+       if (credits) {
                /* Have I received credits that will let me send? */
                spin_lock(&conn->ibc_lock);
 
                        break;
                }
 
-               if (credits != 0) /* credit already posted */
+               if (credits) /* credit already posted */
                        post_credit = IBLND_POSTRX_NO_CREDIT;
                else          /* a keepalive NOOP */
                        post_credit = IBLND_POSTRX_PEER_CREDIT;
        rx->rx_nob = nob;
 
        rc = kiblnd_unpack_msg(msg, rx->rx_nob);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d unpacking rx from %s\n",
                       rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
                goto failed;
 
        fps = net->ibn_fmr_ps[cpt];
        rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't map %d pages: %d\n", npages, rc);
                return rc;
        }
                tx->fmr.fmr_pfmr = NULL;
        }
 
-       if (tx->tx_nfrags != 0) {
+       if (tx->tx_nfrags) {
                kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
                                    tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
                tx->tx_nfrags = 0;
        LASSERT(tx->tx_nwrq > 0);
        LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
 
-       LASSERT(credit == 0 || credit == 1);
+       LASSERT(!credit || credit == 1);
        LASSERT(conn->ibc_outstanding_credits >= 0);
        LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
        LASSERT(conn->ibc_credits >= 0);
                return -EAGAIN;
        }
 
-       if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
+       if (credit && !conn->ibc_credits) {   /* no credits */
                CDEBUG(D_NET, "%s: no credits\n",
                       libcfs_nid2str(peer->ibp_nid));
                return -EAGAIN;
        }
 
-       if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+       if (credit && !IBLND_OOB_CAPABLE(ver) &&
            conn->ibc_credits == 1 &&   /* last credit reserved */
            msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
                CDEBUG(D_NET, "%s: not using last credit\n",
 
        conn->ibc_last_send = jiffies;
 
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        /*
        tx->tx_waiting = 0;
        tx->tx_sending--;
 
-       done = (tx->tx_sending == 0);
+       done = !tx->tx_sending;
        if (done)
                list_del(&tx->tx_list);
 
                        break;
                }
 
-               if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+               if (kiblnd_post_tx_locked(conn, tx, credit))
                        break;
        }
 
                tx->tx_status = -EIO;
        }
 
-       idle = (tx->tx_sending == 0) &&  /* This is the final callback */
+       idle = !tx->tx_sending &&        /* This is the final callback */
               !tx->tx_waiting &&              /* Not waiting for peer */
               !tx->tx_queued;            /* Not re-queued (PUT_DONE) */
        if (idle)
        int wrknob;
 
        LASSERT(!in_interrupt());
-       LASSERT(tx->tx_nwrq == 0);
+       LASSERT(!tx->tx_nwrq);
        LASSERT(type == IBLND_MSG_GET_DONE ||
                type == IBLND_MSG_PUT_DONE);
 
 
        /* allow the port to be reused */
        rc = rdma_set_reuseaddr(cmid, 1);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Unable to set reuse on cmid: %d\n", rc);
                return rc;
        }
                                       (struct sockaddr *)srcaddr,
                                       (struct sockaddr *)dstaddr,
                                       timeout_ms);
-               if (rc == 0) {
+               if (!rc) {
                        CDEBUG(D_NET, "bound to port %hu\n", port);
                        return 0;
                } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
                                       (struct sockaddr *)&dstaddr,
                                       *kiblnd_tunables.kib_timeout * 1000);
        }
-       if (rc != 0) {
+       if (rc) {
                /* Can't initiate address resolution:  */
                CERROR("Can't resolve addr for %s: %d\n",
                       libcfs_nid2str(peer->ibp_nid), rc);
        if (peer) {
                if (list_empty(&peer->ibp_conns)) {
                        /* found a peer, but it's still connecting... */
-                       LASSERT(peer->ibp_connecting != 0 ||
-                               peer->ibp_accepting != 0);
+                       LASSERT(peer->ibp_connecting ||
+                               peer->ibp_accepting);
                        if (tx)
                                list_add_tail(&tx->tx_list,
                                              &peer->ibp_tx_queue);
 
        /* Allocate a peer ready to add to the peer table and retry */
        rc = kiblnd_create_peer(ni, &peer, nid);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
                if (tx) {
                        tx->tx_status = -EHOSTUNREACH;
        if (peer2) {
                if (list_empty(&peer2->ibp_conns)) {
                        /* found a peer, but it's still connecting... */
-                       LASSERT(peer2->ibp_connecting != 0 ||
-                               peer2->ibp_accepting != 0);
+                       LASSERT(peer2->ibp_connecting ||
+                               peer2->ibp_accepting);
                        if (tx)
                                list_add_tail(&tx->tx_list,
                                              &peer2->ibp_tx_queue);
        }
 
        /* Brand new peer */
-       LASSERT(peer->ibp_connecting == 0);
+       LASSERT(!peer->ibp_connecting);
        peer->ibp_connecting = 1;
 
        /* always called with a ref on ni, which prevents ni being shutdown */
-       LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+       LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
 
        if (tx)
                list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
        CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
               payload_nob, payload_niov, libcfs_id2str(target));
 
-       LASSERT(payload_nob == 0 || payload_niov > 0);
+       LASSERT(!payload_nob || payload_niov > 0);
        LASSERT(payload_niov <= LNET_MAX_IOV);
 
        /* Thread context */
                return -EIO;
 
        case LNET_MSG_ACK:
-               LASSERT(payload_nob == 0);
+               LASSERT(!payload_nob);
                break;
 
        case LNET_MSG_GET:
 
                ibmsg = tx->tx_msg;
                rd = &ibmsg->ibm_u.get.ibgm_rd;
-               if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+               if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV))
                        rc = kiblnd_setup_rd_iov(ni, tx, rd,
                                                 lntmsg->msg_md->md_niov,
                                                 lntmsg->msg_md->md_iov.iov,
                                                  lntmsg->msg_md->md_niov,
                                                  lntmsg->msg_md->md_iov.kiov,
                                                  0, lntmsg->msg_md->md_length);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't setup GET sink for %s: %d\n",
                               libcfs_nid2str(target.nid), rc);
                        kiblnd_tx_done(ni, tx);
                        rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
                                                  payload_niov, payload_kiov,
                                                  payload_offset, payload_nob);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't setup PUT src for %s: %d\n",
                               libcfs_nid2str(target.nid), rc);
                        kiblnd_tx_done(ni, tx);
                goto failed_0;
        }
 
-       if (nob == 0)
+       if (!nob)
                rc = 0;
        else if (!kiov)
                rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
                rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
                                          niov, kiov, offset, nob);
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't setup GET src for %s: %d\n",
                       libcfs_nid2str(target.nid), rc);
                goto failed_1;
                goto failed_1;
        }
 
-       if (nob == 0) {
+       if (!nob) {
                /* No RDMA: local completion may happen now! */
                lnet_finalize(ni, lntmsg, 0);
        } else {
                kib_msg_t       *txmsg;
                kib_rdma_desc_t *rd;
 
-               if (mlen == 0) {
+               if (!mlen) {
                        lnet_finalize(ni, lntmsg, 0);
                        kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
                                               rxmsg->ibm_u.putreq.ibprm_cookie);
                else
                        rc = kiblnd_setup_rd_kiov(ni, tx, rd,
                                                  niov, kiov, offset, mlen);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't setup PUT sink for %s: %d\n",
                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
                        kiblnd_tx_done(ni, tx);
        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
        if (list_empty(&peer->ibp_conns) &&
-           peer->ibp_accepting == 0 &&
-           peer->ibp_connecting == 0 &&
-           peer->ibp_error != 0) {
+           !peer->ibp_accepting &&
+           !peer->ibp_connecting &&
+           peer->ibp_error) {
                error = peer->ibp_error;
                peer->ibp_error = 0;
 
 
        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-       if (error != 0)
+       if (error)
                lnet_notify(peer->ibp_ni,
                            peer->ibp_nid, 0, last_alive);
 }
        kib_dev_t *dev;
        unsigned long flags;
 
-       LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+       LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-       if (error != 0 && conn->ibc_comms_error == 0)
+       if (error && !conn->ibc_comms_error)
                conn->ibc_comms_error = error;
 
        if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
                return; /* already being handled  */
 
-       if (error == 0 &&
+       if (!error &&
            list_empty(&conn->ibc_tx_noops) &&
            list_empty(&conn->ibc_tx_queue) &&
            list_empty(&conn->ibc_tx_queue_rsrvd) &&
 
        kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
 
-       if (error != 0 &&
+       if (error &&
            kiblnd_dev_can_failover(dev)) {
                list_add_tail(&dev->ibd_fail_list,
                              &kiblnd_data.kib_failed_devs);
 
                if (txs == &conn->ibc_active_txs) {
                        LASSERT(!tx->tx_queued);
-                       LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+                       LASSERT(tx->tx_waiting || tx->tx_sending);
                } else {
                        LASSERT(tx->tx_queued);
                }
                tx->tx_status = -ECONNABORTED;
                tx->tx_waiting = 0;
 
-               if (tx->tx_sending == 0) {
+               if (!tx->tx_sending) {
                        tx->tx_queued = 0;
                        list_del(&tx->tx_list);
                        list_add(&tx->tx_list, &zombies);
        LIST_HEAD(zombies);
        unsigned long flags;
 
-       LASSERT(error != 0);
+       LASSERT(error);
        LASSERT(!in_interrupt());
 
        write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
                peer->ibp_accepting--;
        }
 
-       if (peer->ibp_connecting != 0 ||
-           peer->ibp_accepting != 0) {
+       if (peer->ibp_connecting ||
+           peer->ibp_accepting) {
                /* another connection attempt under way... */
                write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                        flags);
        LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
        conn->ibc_connvars = NULL;
 
-       if (status != 0) {
+       if (status) {
                /* failed to establish connection */
                kiblnd_peer_connect_failed(peer, active, status);
                kiblnd_finalise_conn(conn);
        else
                peer->ibp_accepting--;
 
-       if (peer->ibp_version == 0) {
+       if (!peer->ibp_version) {
                peer->ibp_version     = conn->ibc_version;
                peer->ibp_incarnation = conn->ibc_incarnation;
        }
        list_del_init(&peer->ibp_tx_queue);
 
        if (!kiblnd_peer_active(peer) ||        /* peer has been deleted */
-           conn->ibc_comms_error != 0) {       /* error has happened already */
+           conn->ibc_comms_error) {       /* error has happened already */
                lnet_ni_t *ni = peer->ibp_ni;
 
                /* start to shut down connection */
 
        rc = rdma_reject(cmid, rej, sizeof(*rej));
 
-       if (rc != 0)
+       if (rc)
                CWARN("Error %d sending reject\n", rc);
 }
 
                goto failed;
 
        rc = kiblnd_unpack_msg(reqmsg, priv_nob);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't parse connection request: %d\n", rc);
                goto failed;
        }
        }
 
        /* check time stamp as soon as possible */
-       if (reqmsg->ibm_dststamp != 0 &&
+       if (reqmsg->ibm_dststamp &&
            reqmsg->ibm_dststamp != net->ibn_incarnation) {
                CWARN("Stale connection request\n");
                rej.ibr_why = IBLND_REJECT_CONN_STALE;
 
        /* assume 'nid' is a new peer; create  */
        rc = kiblnd_create_peer(ni, &peer, nid);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
                rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
                goto failed;
 
        peer2 = kiblnd_find_peer_locked(nid);
        if (peer2) {
-               if (peer2->ibp_version == 0) {
+               if (!peer2->ibp_version) {
                        peer2->ibp_version     = version;
                        peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
                }
                }
 
                /* tie-break connection race in favour of the higher NID */
-               if (peer2->ibp_connecting != 0 &&
+               if (peer2->ibp_connecting &&
                    nid < ni->ni_nid) {
                        write_unlock_irqrestore(g_lock, flags);
 
                peer = peer2;
        } else {
                /* Brand new peer */
-               LASSERT(peer->ibp_accepting == 0);
-               LASSERT(peer->ibp_version == 0 &&
-                       peer->ibp_incarnation == 0);
+               LASSERT(!peer->ibp_accepting);
+               LASSERT(!peer->ibp_version &&
+                       !peer->ibp_incarnation);
 
                peer->ibp_accepting   = 1;
                peer->ibp_version     = version;
                peer->ibp_incarnation = reqmsg->ibm_srcstamp;
 
                /* I have a ref on ni that prevents it being shutdown */
-               LASSERT(net->ibn_shutdown == 0);
+               LASSERT(!net->ibn_shutdown);
 
                kiblnd_peer_addref(peer);
                list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
        CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
 
        rc = rdma_accept(cmid, &cp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
                rej.ibr_version = version;
                rej.ibr_why     = IBLND_REJECT_FATAL;
        if ((!list_empty(&peer->ibp_tx_queue) ||
             peer->ibp_version != version) &&
            peer->ibp_connecting == 1 &&
-           peer->ibp_accepting == 0) {
+           !peer->ibp_accepting) {
                retry = 1;
                peer->ibp_connecting++;
 
 
        LASSERT(net);
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't unpack connack from %s: %d\n",
                       libcfs_nid2str(peer->ibp_nid), rc);
                goto failed;
                rc = -ESTALE;
        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
                       libcfs_nid2str(peer->ibp_nid), rc,
                       msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
         * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
         * immediately tears it down.
         */
-       LASSERT(rc != 0);
+       LASSERT(rc);
        conn->ibc_comms_error = rc;
        kiblnd_connreq_done(conn, 0);
 }
        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
        incarnation = peer->ibp_incarnation;
-       version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
-                                            peer->ibp_version;
+       version = !peer->ibp_version ? IBLND_MSG_VERSION :
+                                      peer->ibp_version;
 
        read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
        LASSERT(conn->ibc_cmid == cmid);
 
        rc = rdma_connect(cmid, &cp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't connect to %s: %d\n",
                       libcfs_nid2str(peer->ibp_nid), rc);
                kiblnd_connreq_done(conn, rc);
                        libcfs_nid2str(peer->ibp_nid), event->status);
                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
                kiblnd_peer_decref(peer);
-               return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
+               return -EHOSTUNREACH;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ADDR_RESOLVED:
                peer = (kib_peer_t *)cmid->context;
                CDEBUG(D_NET, "%s Addr resolved: %d\n",
                       libcfs_nid2str(peer->ibp_nid), event->status);
 
-               if (event->status != 0) {
+               if (event->status) {
                        CNETERR("Can't resolve address for %s: %d\n",
                                libcfs_nid2str(peer->ibp_nid), event->status);
                        rc = event->status;
                } else {
                        rc = rdma_resolve_route(
                                cmid, *kiblnd_tunables.kib_timeout * 1000);
-                       if (rc == 0)
+                       if (!rc)
                                return 0;
                        /* Can't initiate route resolution */
                        CERROR("Can't resolve route for %s: %d\n",
                }
                kiblnd_peer_connect_failed(peer, 1, rc);
                kiblnd_peer_decref(peer);
-               return rc;                    /* rc != 0 destroys cmid */
+               return rc;                    /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_ERROR:
                peer = (kib_peer_t *)cmid->context;
                        libcfs_nid2str(peer->ibp_nid), event->status);
                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
                kiblnd_peer_decref(peer);
-               return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
+               return -EHOSTUNREACH;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_RESOLVED:
                peer = (kib_peer_t *)cmid->context;
                CDEBUG(D_NET, "%s Route resolved: %d\n",
                       libcfs_nid2str(peer->ibp_nid), event->status);
 
-               if (event->status == 0)
+               if (!event->status)
                        return kiblnd_active_connect(cmid);
 
                CNETERR("Can't resolve route for %s: %d\n",
                        libcfs_nid2str(peer->ibp_nid), event->status);
                kiblnd_peer_connect_failed(peer, 1, event->status);
                kiblnd_peer_decref(peer);
-               return event->status;      /* rc != 0 destroys cmid */
+               return event->status;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_UNREACHABLE:
                conn = (kib_conn_t *)cmid->context;
                        LASSERT(tx->tx_queued);
                } else {
                        LASSERT(!tx->tx_queued);
-                       LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+                       LASSERT(tx->tx_waiting || tx->tx_sending);
                }
 
                if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
                        if (*kiblnd_tunables.kib_timeout > n * p)
                                chunk = (chunk * n * p) /
                                        *kiblnd_tunables.kib_timeout;
-                       if (chunk == 0)
+                       if (!chunk)
                                chunk = 1;
 
                        for (i = 0; i < chunk; i++) {
         * NB I'm not allowed to schedule this conn once its refcount has
         * reached 0.  Since fundamentally I'm racing with scheduler threads
         * consuming my CQ I could be called after all completions have
-        * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
+        * occurred.  But in this case, !ibc_nrx && !ibc_nsends_posted
         * and this CQ is about to be destroyed so I NOOP.
         */
        kib_conn_t *conn = arg;
        sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
 
        rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
-       if (rc != 0) {
+       if (rc) {
                CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
                      sched->ibs_cpt);
        }
                        spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                        rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
-                       if (rc == 0) {
+                       if (!rc) {
                                rc = ib_req_notify_cq(conn->ibc_cq,
                                                      IB_CQ_NEXT_COMP);
                                if (rc < 0) {
 
                        spin_lock_irqsave(&sched->ibs_lock, flags);
 
-                       if (rc != 0 || conn->ibc_ready) {
+                       if (rc || conn->ibc_ready) {
                                /*
                                 * There may be another completion waiting; get
                                 * another scheduler to check while I handle
                                conn->ibc_scheduled = 0;
                        }
 
-                       if (rc != 0) {
+                       if (rc) {
                                spin_unlock_irqrestore(&sched->ibs_lock, flags);
                                kiblnd_complete(&wc);
 
        unsigned long flags;
        int rc;
 
-       LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
+       LASSERT(*kiblnd_tunables.kib_dev_failover);
 
        cfs_block_allsigs();
 
                remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
                write_lock_irqsave(glock, flags);
 
-               if (!long_sleep || rc != 0)
+               if (!long_sleep || rc)
                        continue;
 
                /*
 
        if (*kiblnd_tunables.kib_map_on_demand == 1)
                *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
 
-       if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+       if (!*kiblnd_tunables.kib_concurrent_sends) {
                if (*kiblnd_tunables.kib_map_on_demand > 0 &&
                    *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
                        *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
 
 void
 ksocknal_destroy_route(ksock_route_t *route)
 {
-       LASSERT(atomic_read(&route->ksnr_refcount) == 0);
+       LASSERT(!atomic_read(&route->ksnr_refcount));
 
        if (route->ksnr_peer)
                ksocknal_peer_decref(route->ksnr_peer);
        CDEBUG(D_NET, "peer %s %p deleted\n",
               libcfs_id2str(peer->ksnp_id), peer);
 
-       LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
-       LASSERT(peer->ksnp_accepting == 0);
+       LASSERT(!atomic_read(&peer->ksnp_refcount));
+       LASSERT(!peer->ksnp_accepting);
        LASSERT(list_empty(&peer->ksnp_conns));
        LASSERT(list_empty(&peer->ksnp_routes));
        LASSERT(list_empty(&peer->ksnp_tx_queue));
                        if (peer->ksnp_ni != ni)
                                continue;
 
-                       if (peer->ksnp_n_passive_ips == 0 &&
+                       if (!peer->ksnp_n_passive_ips &&
                            list_empty(&peer->ksnp_routes)) {
                                if (index-- > 0)
                                        continue;
        ksocknal_route_addref(route);
 
        if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
-               if (route->ksnr_myipaddr == 0) {
+               if (!route->ksnr_myipaddr) {
                        /* route wasn't bound locally yet (the initial route) */
                        CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
                               libcfs_id2str(peer->ksnp_id),
        LASSERT(!route->ksnr_peer);
        LASSERT(!route->ksnr_scheduled);
        LASSERT(!route->ksnr_connecting);
-       LASSERT(route->ksnr_connected == 0);
+       LASSERT(!route->ksnr_connected);
 
        /* LASSERT(unique) */
        list_for_each(tmp, &peer->ksnp_routes) {
                ksocknal_close_conn_locked(conn, 0);
        }
 
-       if (route->ksnr_myipaddr != 0) {
+       if (route->ksnr_myipaddr) {
                iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
                                          route->ksnr_myipaddr);
                if (iface)
 
        /* Have a brand new peer ready... */
        rc = ksocknal_create_peer(&peer, ni, id);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        route = ksocknal_create_route(ipaddr, port);
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
        /* always called with a ref on ni, so shutdown can't have started */
-       LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+       LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
 
        peer2 = ksocknal_find_peer_locked(ni, id);
        if (peer2) {
                route = list_entry(tmp, ksock_route_t, ksnr_list);
 
                /* no match */
-               if (!(ip == 0 || route->ksnr_ipaddr == ip))
+               if (!(!ip || route->ksnr_ipaddr == ip))
                        continue;
 
                route->ksnr_share_count = 0;
                nshared += route->ksnr_share_count;
        }
 
-       if (nshared == 0) {
+       if (!nshared) {
                /*
                 * remove everything else if there are no explicit entries
                 * left
                        route = list_entry(tmp, ksock_route_t, ksnr_list);
 
                        /* we should only be removing auto-entries */
-                       LASSERT(route->ksnr_share_count == 0);
+                       LASSERT(!route->ksnr_share_count);
                        ksocknal_del_route_locked(route);
                }
 
 
        for (i = 0; i < nip; i++) {
                ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
-               LASSERT(ipaddrs[i] != 0);
+               LASSERT(ipaddrs[i]);
        }
 
        read_unlock(&ksocknal_data.ksnd_global_lock);
        int i;
 
        for (i = 0; i < nips; i++) {
-               if (ips[i] == 0)
+               if (!ips[i])
                        continue;
 
                this_xor = ips[i] ^ iface->ksni_ipaddr;
-               this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
+               this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
 
                if (!(best < 0 ||
                      best_netmatch < this_netmatch ||
 
                                k = ksocknal_match_peerip(iface, peerips, n_peerips);
                                xor = ip ^ peerips[k];
-                               this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
+                               this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
 
                                if (!(!best_iface ||
                                      best_netmatch < this_netmatch ||
                        if (route)
                                continue;
 
-                       this_netmatch = (((iface->ksni_ipaddr ^
+                       this_netmatch = (!((iface->ksni_ipaddr ^
                                           newroute->ksnr_ipaddr) &
-                                          iface->ksni_netmask) == 0) ? 1 : 0;
+                                          iface->ksni_netmask)) ? 1 : 0;
 
                        if (!(!best_iface ||
                              best_netmatch < this_netmatch ||
        int peer_port;
 
        rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-       LASSERT(rc == 0);                     /* we succeeded before */
+       LASSERT(!rc);                 /* we succeeded before */
 
        LIBCFS_ALLOC(cr, sizeof(*cr));
        if (!cr) {
 
        /* stash conn's local and remote addrs */
        rc = ksocknal_lib_get_conn_addrs(conn);
-       if (rc != 0)
+       if (rc)
                goto failed_1;
 
        /*
                }
 
                rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
-               if (rc != 0)
+               if (rc)
                        goto failed_1;
        } else {
                peerid.nid = LNET_NID_ANY;
        if (rc < 0)
                goto failed_1;
 
-       LASSERT(rc == 0 || active);
+       LASSERT(!rc || active);
        LASSERT(conn->ksnc_proto);
        LASSERT(peerid.nid != LNET_NID_ANY);
 
                write_lock_bh(global_lock);
        } else {
                rc = ksocknal_create_peer(&peer, ni, peerid);
-               if (rc != 0)
+               if (rc)
                        goto failed_1;
 
                write_lock_bh(global_lock);
 
                /* called with a ref on ni, so shutdown can't have started */
-               LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+               LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
 
                peer2 = ksocknal_find_peer_locked(ni, peerid);
                if (!peer2) {
                         * Reply on a passive connection attempt so the peer
                         * realises we're connected.
                         */
-                       LASSERT(rc == 0);
+                       LASSERT(!rc);
                        if (!active)
                                rc = EALREADY;
 
         * socket; this ensures the socket only tears down after the
         * response has been sent.
         */
-       if (rc == 0)
+       if (!rc)
                rc = ksocknal_lib_setup_sock(sock);
 
        write_lock_bh(global_lock);
 
        write_unlock_bh(global_lock);
 
-       if (rc != 0) {
+       if (rc) {
                write_lock_bh(global_lock);
                if (!conn->ksnc_closing) {
                        /* could be closed by another thread */
                        ksocknal_close_conn_locked(conn, rc);
                }
                write_unlock_bh(global_lock);
-       } else if (ksocknal_connsock_addref(conn) == 0) {
+       } else if (!ksocknal_connsock_addref(conn)) {
                /* Allow I/O to proceed. */
                ksocknal_read_callback(conn);
                ksocknal_write_callback(conn);
        ksock_conn_t *conn2;
        struct list_head *tmp;
 
-       LASSERT(peer->ksnp_error == 0);
+       LASSERT(!peer->ksnp_error);
        LASSERT(!conn->ksnc_closing);
        conn->ksnc_closing = 1;
 
        if (route) {
                /* dissociate conn from route... */
                LASSERT(!route->ksnr_deleted);
-               LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+               LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
 
                conn2 = NULL;
                list_for_each(tmp, &peer->ksnp_conns) {
         */
        read_lock(&ksocknal_data.ksnd_global_lock);
 
-       if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+       if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
            list_empty(&peer->ksnp_conns) &&
-           peer->ksnp_accepting == 0 &&
+           !peer->ksnp_accepting &&
            !ksocknal_find_connecting_route_locked(peer)) {
                notify = 1;
                last_alive = peer->ksnp_last_alive;
                if (tx->tx_conn != conn)
                        continue;
 
-               LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
+               LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
 
                tx->tx_msg.ksm_zc_cookies[0] = 0;
                tx->tx_zc_aborted = 1; /* mark it as not-acked */
         */
        conn->ksnc_scheduler->kss_nconns--;
 
-       if (peer->ksnp_error != 0) {
+       if (peer->ksnp_error) {
                /* peer's last conn closed in error */
                LASSERT(list_empty(&peer->ksnp_conns));
                failed = 1;
 {
        /* Queue the conn for the reaper to destroy */
 
-       LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
+       LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
        list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
        /* Final coup-de-grace of the reaper */
        CDEBUG(D_NET, "connection %p\n", conn);
 
-       LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
-       LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
+       LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
+       LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
        LASSERT(!conn->ksnc_sock);
        LASSERT(!conn->ksnc_route);
        LASSERT(!conn->ksnc_tx_scheduled);
        list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
                conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
 
-               if (ipaddr == 0 ||
-                   conn->ksnc_ipaddr == ipaddr) {
+               if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
                        count++;
                        ksocknal_close_conn_locked(conn, why);
                }
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
        /* wildcards always succeed */
-       if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+       if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
                return 0;
 
-       if (count == 0)
+       if (!count)
                return -ENOENT;
        else
                return 0;
 
        read_unlock(glock);
 
-       if (last_alive != 0)
+       if (last_alive)
                *when = last_alive;
 
        CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
                        }
                        read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                       if (i == 0) /* no match */
+                       if (!i) /* no match */
                                break;
 
                        rc = 0;
        struct list_head *rtmp;
        ksock_route_t *route;
 
-       if (ipaddress == 0 ||
-           netmask == 0)
+       if (!ipaddress || !netmask)
                return -EINVAL;
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
                if (route->ksnr_myipaddr != ipaddr)
                        continue;
 
-               if (route->ksnr_share_count != 0) {
+               if (route->ksnr_share_count) {
                        /* Manually created; keep, but unbind */
                        route->ksnr_myipaddr = 0;
                } else {
        for (i = 0; i < net->ksnn_ninterfaces; i++) {
                this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
 
-               if (!(ipaddress == 0 ||
-                     ipaddress == this_ip))
+               if (!(!ipaddress || ipaddress == this_ip))
                        continue;
 
                rc = 0;
                rc = ksocknal_get_peer_info(ni, data->ioc_count,
                                            &id, &myip, &ip, &port,
                                            &conn_count,  &share_count);
-               if (rc != 0)
+               if (rc)
                        return rc;
 
                data->ioc_nid    = id.nid;
 static void
 ksocknal_free_buffers(void)
 {
-       LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+       LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
 
        if (ksocknal_data.ksnd_sched_info) {
                struct ksock_sched_info *info;
        int i;
        int j;
 
-       LASSERT(ksocknal_data.ksnd_nnets == 0);
+       LASSERT(!ksocknal_data.ksnd_nnets);
 
        switch (ksocknal_data.ksnd_init) {
        default:
                                                &sched->kss_rx_conns));
                                        LASSERT(list_empty(
                                                &sched->kss_zombie_noop_txs));
-                                       LASSERT(sched->kss_nconns == 0);
+                                       LASSERT(!sched->kss_nconns);
                                }
                        }
                }
 
                i = 4;
                read_lock(&ksocknal_data.ksnd_global_lock);
-               while (ksocknal_data.ksnd_nthreads != 0) {
+               while (ksocknal_data.ksnd_nthreads) {
                        i++;
                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                               "waiting for %d threads to terminate\n",
        int i;
 
        LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
-       LASSERT(ksocknal_data.ksnd_nnets == 0);
+       LASSERT(!ksocknal_data.ksnd_nnets);
 
        memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
 
                snprintf(name, sizeof(name), "socknal_cd%02d", i);
                rc = ksocknal_thread_start(ksocknal_connd,
                                           (void *)((ulong_ptr_t)i), name);
-               if (rc != 0) {
+               if (rc) {
                        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
                        ksocknal_data.ksnd_connd_starting--;
                        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
        }
 
        rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't spawn socknal reaper: %d\n", rc);
                goto failed;
        }
        /* Wait for all peer state to clean up */
        i = 2;
        spin_lock_bh(&net->ksnn_lock);
-       while (net->ksnn_npeers != 0) {
+       while (net->ksnn_npeers) {
                spin_unlock_bh(&net->ksnn_lock);
 
                i++;
        spin_unlock_bh(&net->ksnn_lock);
 
        for (i = 0; i < net->ksnn_ninterfaces; i++) {
-               LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
-               LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
+               LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
+               LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
        }
 
        list_del(&net->ksnn_list);
        LIBCFS_FREE(net, sizeof(*net));
 
        ksocknal_data.ksnd_nnets--;
-       if (ksocknal_data.ksnd_nnets == 0)
+       if (!ksocknal_data.ksnd_nnets)
                ksocknal_base_shutdown();
 }
 
                        continue;
 
                rc = lnet_ipif_query(names[i], &up, &ip, &mask);
-               if (rc != 0) {
+               if (rc) {
                        CWARN("Can't get interface %s info: %d\n",
                              names[i], rc);
                        continue;
 
        lnet_ipif_free_enumeration(names, n);
 
-       if (j == 0)
+       if (!j)
                CERROR("Can't find any usable interfaces\n");
 
        return j;
                                if (colon2)
                                        *colon2 = 0;
 
-                               found = strcmp(ifnam, ifnam2) == 0;
+                               found = !strcmp(ifnam, ifnam2);
                                if (colon2)
                                        *colon2 = ':';
                        }
        int rc = 0;
        int i;
 
-       if (info->ksi_nthreads == 0) {
+       if (!info->ksi_nthreads) {
                if (*ksocknal_tunables.ksnd_nscheds > 0) {
                        nthrs = info->ksi_nthreads_max;
                } else {
 
                rc = ksocknal_thread_start(ksocknal_scheduler,
                                           (void *)id, name);
-               if (rc == 0)
+               if (!rc)
                        continue;
 
                CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
                        continue;
 
                rc = ksocknal_start_schedulers(info);
-               if (rc != 0)
+               if (rc)
                        return rc;
        }
        return 0;
 
        if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
                rc = ksocknal_base_startup();
-               if (rc != 0)
+               if (rc)
                        return rc;
        }
 
                                             &net->ksnn_interfaces[i].ksni_ipaddr,
                                             &net->ksnn_interfaces[i].ksni_netmask);
 
-                       if (rc != 0) {
+                       if (rc) {
                                CERROR("Can't get interface %s info: %d\n",
                                       ni->ni_interfaces[i], rc);
                                goto fail_1;
 
        /* call it before add it to ksocknal_data.ksnd_nets */
        rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
-       if (rc != 0)
+       if (rc)
                goto fail_1;
 
        ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
  fail_1:
        LIBCFS_FREE(net, sizeof(*net));
  fail_0:
-       if (ksocknal_data.ksnd_nnets == 0)
+       if (!ksocknal_data.ksnd_nnets)
                ksocknal_base_shutdown();
 
        return -ENETDOWN;
        the_ksocklnd.lnd_accept   = ksocknal_accept;
 
        rc = ksocknal_tunables_init();
-       if (rc != 0)
+       if (rc)
                return rc;
 
        lnet_register_lnd(&the_ksocklnd);
 
                nob -= iov->iov_len;
                tx->tx_iov = ++iov;
                tx->tx_niov--;
-       } while (nob != 0);
+       } while (nob);
 
        return rc;
 }
        int nob;
        int rc;
 
-       LASSERT(tx->tx_niov == 0);
+       LASSERT(!tx->tx_niov);
        LASSERT(tx->tx_nkiov > 0);
 
        /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
                nob -= (int)kiov->kiov_len;
                tx->tx_kiov = ++kiov;
                tx->tx_nkiov--;
-       } while (nob != 0);
+       } while (nob);
 
        return rc;
 }
        int rc;
        int bufnob;
 
-       if (ksocknal_data.ksnd_stall_tx != 0) {
+       if (ksocknal_data.ksnd_stall_tx) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
        }
 
-       LASSERT(tx->tx_resid != 0);
+       LASSERT(tx->tx_resid);
 
        rc = ksocknal_connsock_addref(conn);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(conn->ksnc_closing);
                return -ESHUTDOWN;
        }
                        /* testing... */
                        ksocknal_data.ksnd_enomem_tx--;
                        rc = -EAGAIN;
-               } else if (tx->tx_niov != 0) {
+               } else if (tx->tx_niov) {
                        rc = ksocknal_send_iov(conn, tx);
                } else {
                        rc = ksocknal_send_kiov(conn, tx);
 
                if (rc <= 0) { /* Didn't write anything? */
 
-                       if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+                       if (!rc) /* some stacks return 0 instead of -EAGAIN */
                                rc = -EAGAIN;
 
                        /* Check if EAGAIN is due to memory pressure */
                atomic_sub(rc, &conn->ksnc_tx_nob);
                rc = 0;
 
-       } while (tx->tx_resid != 0);
+       } while (tx->tx_resid);
 
        ksocknal_connsock_decref(conn);
        return rc;
                nob -= iov->iov_len;
                conn->ksnc_rx_iov = ++iov;
                conn->ksnc_rx_niov--;
-       } while (nob != 0);
+       } while (nob);
 
        return rc;
 }
                nob -= kiov->kiov_len;
                conn->ksnc_rx_kiov = ++kiov;
                conn->ksnc_rx_nkiov--;
-       } while (nob != 0);
+       } while (nob);
 
        return 1;
 }
         */
        int rc;
 
-       if (ksocknal_data.ksnd_stall_rx != 0) {
+       if (ksocknal_data.ksnd_stall_rx) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
        }
 
        rc = ksocknal_connsock_addref(conn);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(conn->ksnc_closing);
                return -ESHUTDOWN;
        }
 
        for (;;) {
-               if (conn->ksnc_rx_niov != 0)
+               if (conn->ksnc_rx_niov)
                        rc = ksocknal_recv_iov(conn);
                else
                        rc = ksocknal_recv_kiov(conn);
                        /* error/EOF or partial receive */
                        if (rc == -EAGAIN) {
                                rc = 1;
-                       } else if (rc == 0 && conn->ksnc_rx_started) {
+                       } else if (!rc && conn->ksnc_rx_started) {
                                /* EOF in the middle of a message */
                                rc = -EPROTO;
                        }
 
                /* Completed a fragment */
 
-               if (conn->ksnc_rx_nob_wanted == 0) {
+               if (!conn->ksnc_rx_nob_wanted) {
                        rc = 1;
                        break;
                }
 ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 {
        lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
-       int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
+       int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
 
        LASSERT(ni || tx->tx_conn);
 
        tx->tx_deadline =
                cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
 
-       LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+       LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
 
        tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
 
-       if (peer->ksnp_zc_next_cookie == 0)
+       if (!peer->ksnp_zc_next_cookie)
                peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
 
        list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
 
        spin_lock(&peer->ksnp_lock);
 
-       if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+       if (!tx->tx_msg.ksm_zc_cookies[0]) {
                /* Not waiting for an ACK */
                spin_unlock(&peer->ksnp_lock);
                return;
 
        CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
 
-       if (tx->tx_resid == 0) {
+       if (!tx->tx_resid) {
                /* Sent everything OK */
-               LASSERT(rc == 0);
+               LASSERT(!rc);
 
                return 0;
        }
 
        LASSERT(!route->ksnr_scheduled);
        LASSERT(!route->ksnr_connecting);
-       LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+       LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
 
        route->ksnr_scheduled = 1;            /* scheduling conn for connd */
        ksocknal_route_addref(route);      /* extra ref for connd */
        bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
        spin_lock_bh(&sched->kss_lock);
 
-       if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+       if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
                /* First packet starts the timeout */
                conn->ksnc_tx_deadline =
                        cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
                 * The packet is noop ZC ACK, try to piggyback the ack_cookie
                 * on a normal packet so I don't need to send it
                 */
-               LASSERT(msg->ksm_zc_cookies[1] != 0);
+               LASSERT(msg->ksm_zc_cookies[1]);
                LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
                if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
                 * It's a normal packet - can it piggback a noop zc-ack that
                 * has been queued already?
                 */
-               LASSERT(msg->ksm_zc_cookies[1] == 0);
+               LASSERT(!msg->ksm_zc_cookies[1]);
                LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
 
                ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
                        continue;
 
                /* all route types connected ? */
-               if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+               if (!(ksocknal_route_mask() & ~route->ksnr_connected))
                        continue;
 
-               if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+               if (!(!route->ksnr_retry_interval || /* first attempt */
                      cfs_time_aftereq(now, route->ksnr_timeout))) {
                        CDEBUG(D_NET,
                               "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
 
                write_unlock_bh(g_lock);
 
-               if ((id.pid & LNET_PID_USERFLAG) != 0) {
+               if (id.pid & LNET_PID_USERFLAG) {
                        CERROR("Refusing to create a connection to userspace process %s\n",
                               libcfs_id2str(id));
                        return -EHOSTUNREACH;
                rc = ksocknal_add_peer(ni, id,
                                       LNET_NIDADDR(id.nid),
                                       lnet_acceptor_port());
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't add peer %s: %d\n",
                               libcfs_id2str(id), rc);
                        return rc;
        CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
               payload_nob, payload_niov, libcfs_id2str(target));
 
-       LASSERT(payload_nob == 0 || payload_niov > 0);
+       LASSERT(!payload_nob || payload_niov > 0);
        LASSERT(payload_niov <= LNET_MAX_IOV);
        /* payload is either all vaddrs or all pages */
        LASSERT(!(payload_kiov && payload_iov));
        if (!mpflag)
                cfs_memory_pressure_restore(mpflag);
 
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        ksocknal_free_tx(tx);
 
        LASSERT(conn->ksnc_proto);
 
-       if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+       if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
                /* Remind the socket to ack eagerly... */
                ksocknal_lib_eager_ack(conn);
        }
 
-       if (nob_to_skip == 0) {  /* right at next packet boundary now */
+       if (!nob_to_skip) {      /* right at next packet boundary now */
                conn->ksnc_rx_started = 0;
                mb();                  /* racing with timeout thread */
 
                skipped += nob;
                nob_to_skip -= nob;
 
-       } while (nob_to_skip != 0 &&    /* mustn't overflow conn's rx iov */
+       } while (nob_to_skip &&    /* mustn't overflow conn's rx iov */
                 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
 
        conn->ksnc_rx_niov = niov;
                conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
                conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
  again:
-       if (conn->ksnc_rx_nob_wanted != 0) {
+       if (conn->ksnc_rx_nob_wanted) {
                rc = ksocknal_receive(conn);
 
                if (rc <= 0) {
                        LASSERT(rc != -EAGAIN);
 
-                       if (rc == 0)
+                       if (!rc)
                                CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
                                       conn,
                                       libcfs_id2str(conn->ksnc_peer->ksnp_id),
                        /* it's not an error if conn is being closed */
                        ksocknal_close_conn_and_siblings(conn,
                                                         (conn->ksnc_closing) ? 0 : rc);
-                       return (rc == 0 ? -ESHUTDOWN : rc);
+                       return (!rc ? -ESHUTDOWN : rc);
                }
 
-               if (conn->ksnc_rx_nob_wanted != 0) {
+               if (conn->ksnc_rx_nob_wanted) {
                        /* short read */
                        return -EAGAIN;
                }
                }
 
                if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
-                   conn->ksnc_msg.ksm_csum != 0 &&     /* has checksum */
+                   conn->ksnc_msg.ksm_csum &&     /* has checksum */
                    conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
                        /* NOOP Checksum error */
                        CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
                        return -EIO;
                }
 
-               if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
+               if (conn->ksnc_msg.ksm_zc_cookies[1]) {
                        __u64 cookie = 0;
 
                        LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
                        rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
                                               conn->ksnc_msg.ksm_zc_cookies[1]);
 
-                       if (rc != 0) {
+                       if (rc) {
                                CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
                                       libcfs_id2str(conn->ksnc_peer->ksnp_id),
                                       cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
                /* unpack message header */
                conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
 
-               if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+               if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
                        /* Userspace peer */
                        lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
                        id = &conn->ksnc_peer->ksnp_id;
                /* payload all received */
                rc = 0;
 
-               if (conn->ksnc_rx_nob_left == 0 &&   /* not truncating */
-                   conn->ksnc_msg.ksm_csum != 0 &&  /* has checksum */
+               if (!conn->ksnc_rx_nob_left &&   /* not truncating */
+                   conn->ksnc_msg.ksm_csum &&  /* has checksum */
                    conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
                        CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
                               libcfs_id2str(conn->ksnc_peer->ksnp_id),
                        rc = -EIO;
                }
 
-               if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
+               if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
                        LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
 
                        lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
 
                lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
 
-               if (rc != 0) {
+               if (rc) {
                        ksocknal_new_packet(conn, 0);
                        ksocknal_close_conn_and_siblings(conn, rc);
                        return -EPROTO;
        conn->ksnc_rx_nob_wanted = mlen;
        conn->ksnc_rx_nob_left = rlen;
 
-       if (mlen == 0 || iov) {
+       if (!mlen || iov) {
                conn->ksnc_rx_nkiov = 0;
                conn->ksnc_rx_kiov = NULL;
                conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
        cfs_block_allsigs();
 
        rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set CPT affinity to %d: %d\n",
                       info->ksi_cpt, rc);
        }
                        LASSERT(conn->ksnc_rx_scheduled);
 
                        /* Did process_receive get everything it wanted? */
-                       if (rc == 0)
+                       if (!rc)
                                conn->ksnc_rx_ready = 1;
 
                        if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
                                rc = wait_event_interruptible_exclusive(
                                        sched->kss_waitq,
                                        !ksocknal_sched_cansleep(sched));
-                               LASSERT(rc == 0);
+                               LASSERT(!rc);
                        } else {
                                cond_resched();
                        }
        else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
                version = __swab32(hello->kshm_version);
 
-       if (version != 0) {
+       if (version) {
 #if SOCKNAL_VERSION_DEBUG
                if (*ksocknal_tunables.ksnd_protocol == 1)
                        return NULL;
                            lnet_acceptor_timeout();
 
        rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading HELLO from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0);
 
        rc = lnet_sock_read(sock, &hello->kshm_version,
                            sizeof(hello->kshm_version), timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading HELLO from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0);
 
        /* receive the rest of hello message anyway */
        rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading or checking hello from from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0);
                 * route got connected while queued
                 */
                if (peer->ksnp_closing || route->ksnr_deleted ||
-                   wanted == 0) {
+                   !wanted) {
                        retry_later = 0;
                        break;
                }
                if (retry_later) /* needs reschedule */
                        break;
 
-               if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+               if (wanted & (1 << SOCKLND_CONN_ANY)) {
                        type = SOCKLND_CONN_ANY;
-               } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+               } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
                        type = SOCKLND_CONN_CONTROL;
-               } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+               } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
                        type = SOCKLND_CONN_BULK_IN;
                } else {
-                       LASSERT((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+                       LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
                        type = SOCKLND_CONN_BULK_OUT;
                }
 
                rc = lnet_connect(&sock, peer->ksnp_id.nid,
                                  route->ksnr_myipaddr,
                                  route->ksnr_ipaddr, route->ksnr_port);
-               if (rc != 0)
+               if (rc)
                        goto failed;
 
                rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
                 * A +ve RC means I have to retry because I lost the connection
                 * race or I have to renegotiate protocol version
                 */
-               retry_later = (rc != 0);
+               retry_later = (rc);
                if (retry_later)
                        CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
                               libcfs_nid2str(peer->ksnp_id.nid));
                 * the peer's incoming connection request
                 */
                if (rc == EALREADY ||
-                   (rc == 0 && peer->ksnp_accepting > 0)) {
+                   (!rc && peer->ksnp_accepting > 0)) {
                        /*
                         * We want to introduce a delay before next
                         * attempt to connect if we lost conn race,
                min(route->ksnr_retry_interval,
                    cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
 
-       LASSERT(route->ksnr_retry_interval != 0);
+       LASSERT(route->ksnr_retry_interval);
        route->ksnr_timeout = cfs_time_add(cfs_time_current(),
                                           route->ksnr_retry_interval);
 
        if (!list_empty(&peer->ksnp_tx_queue) &&
-           peer->ksnp_accepting == 0 &&
+           !peer->ksnp_accepting &&
            !ksocknal_find_connecting_route_locked(peer)) {
                ksock_conn_t *conn;
 
        rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
 
        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-       if (rc == 0)
+       if (!rc)
                return 1;
 
        /* we tried ... */
        /* connd_routes can contain both pending and ordinary routes */
        list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
                            ksnr_connd_list) {
-               if (route->ksnr_retry_interval == 0 ||
+               if (!route->ksnr_retry_interval ||
                    cfs_time_aftereq(now, route->ksnr_timeout))
                        return route;
 
                 * some platform (like Darwin8.x)
                 */
                error = conn->ksnc_sock->sk->sk_err;
-               if (error != 0) {
+               if (error) {
                        ksocknal_conn_addref(conn);
 
                        switch (error) {
                }
 
                if ((!list_empty(&conn->ksnc_tx_queue) ||
-                    conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
+                    conn->ksnc_sock->sk->sk_wmem_queued) &&
                    cfs_time_aftereq(cfs_time_current(),
                                     conn->ksnc_tx_deadline)) {
                        /*
                return -ENOMEM;
        }
 
-       if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+       if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
                read_lock(&ksocknal_data.ksnd_global_lock);
                return 1;
        }
                int resid = 0;
                int n = 0;
 
-               if (ksocknal_send_keepalive_locked(peer) != 0) {
+               if (ksocknal_send_keepalive_locked(peer)) {
                        read_unlock(&ksocknal_data.ksnd_global_lock);
                        goto again;
                }
                        n++;
                }
 
-               if (n == 0) {
+               if (!n) {
                        spin_unlock(&peer->ksnp_lock);
                        continue;
                }
                        if (*ksocknal_tunables.ksnd_timeout > n * p)
                                chunk = (chunk * n * p) /
                                        *ksocknal_tunables.ksnd_timeout;
-                       if (chunk == 0)
+                       if (!chunk)
                                chunk = 1;
 
                        for (i = 0; i < chunk; i++) {
                        deadline = cfs_time_add(deadline, cfs_time_seconds(p));
                }
 
-               if (nenomem_conns != 0) {
+               if (nenomem_conns) {
                        /*
                         * Reduce my timeout if I rescheduled ENOMEM conns.
                         * This also prevents me getting woken immediately
 
        /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
        LASSERT(!conn->ksnc_closing);
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d getting sock peer IP\n", rc);
                return rc;
        }
 
        rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d getting sock local IP\n", rc);
                return rc;
        }
         * ZC if the socket supports scatter/gather and doesn't need software
         * checksums
         */
-       return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
+       return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
 }
 
 int
        if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
            conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection  */
            tx->tx_nob == tx->tx_resid           && /* frist sending    */
-           tx->tx_msg.ksm_csum == 0)                /* not checksummed  */
+           !tx->tx_msg.ksm_csum)                    /* not checksummed  */
                ksocknal_lib_csum_tx(tx);
 
        /*
         * NB we can't trust socket ops to either consume our iovs
         * or leave them alone.
         */
-       if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
+       if (tx->tx_msg.ksm_zc_cookies[0]) {
                /* Zero copy is enabled */
                struct sock *sk = sock->sk;
                struct page *page = kiov->kiov_page;
                conn->ksnc_msg.ksm_csum = 0;
        }
 
-       if (saved_csum != 0) {
+       if (saved_csum) {
                /* accumulate checksum */
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
                        LASSERT(i < niov);
                return NULL;
 
        for (nob = i = 0; i < niov; i++) {
-               if ((kiov[i].kiov_offset != 0 && i > 0) ||
+               if ((kiov[i].kiov_offset && i > 0) ||
                    (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
                        return NULL;
 
        rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
                            n, nob, MSG_DONTWAIT);
 
-       if (conn->ksnc_msg.ksm_csum != 0) {
+       if (conn->ksnc_msg.ksm_csum) {
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
                        LASSERT(i < niov);
 
        int rc;
 
        rc = ksocknal_connsock_addref(conn);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(conn->ksnc_closing);
                *txmem = *rxmem = *nagle = 0;
                return -ESHUTDOWN;
        }
 
        rc = lnet_sock_getbuf(sock, txmem, rxmem);
-       if (rc == 0) {
+       if (!rc) {
                len = sizeof(*nagle);
                rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
                                       (char *)nagle, &len);
 
        ksocknal_connsock_decref(conn);
 
-       if (rc == 0)
+       if (!rc)
                *nagle = !*nagle;
        else
                *txmem = *rxmem = *nagle = 0;
 
        rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
                               sizeof(linger));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set SO_LINGER: %d\n", rc);
                return rc;
        }
        option = -1;
        rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
                               sizeof(option));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set SO_LINGER2: %d\n", rc);
                return rc;
        }
 
                rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
                                       (char *)&option, sizeof(option));
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't disable nagle: %d\n", rc);
                        return rc;
                }
 
        rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
                              *ksocknal_tunables.ksnd_rx_buffer_size);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
                       *ksocknal_tunables.ksnd_tx_buffer_size,
                       *ksocknal_tunables.ksnd_rx_buffer_size, rc);
        option = (do_keepalive ? 1 : 0);
        rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
                               sizeof(option));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
                return rc;
        }
 
        rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
                               sizeof(keep_idle));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
                return rc;
        }
 
        rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
                               (char *)&keep_intvl, sizeof(keep_intvl));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
                return rc;
        }
 
        rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
                               sizeof(keep_count));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
                return rc;
        }
        int rc;
 
        rc = ksocknal_connsock_addref(conn);
-       if (rc != 0)                        /* being shut down */
+       if (rc)                     /* being shut down */
                return;
 
        sk = conn->ksnc_sock->sk;
 
        rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
                               (char *)&val, sizeof(val));
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        lock_sock(sk);
        tp->nonagle = nonagle;
 
        }
 
        LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
-       LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
+       LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
 
        if (tx_ack)
                cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
 
        if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
                /* replace the keepalive PING with a real ACK */
-               LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+               LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
                tx->tx_msg.ksm_zc_cookies[1] = cookie;
                return 1;
        }
                return 1; /* XXX return error in the future */
        }
 
-       if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+       if (!tx->tx_msg.ksm_zc_cookies[0]) {
                /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
                if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
                        tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
                        tmp = tx->tx_msg.ksm_zc_cookies[0];
                }
 
-               if (tmp != 0) {
+               if (tmp) {
                        /* range of cookies */
                        tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
                        tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
                return -ENOMEM;
 
        rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        ksocknal_free_tx(tx);
        LIST_HEAD(zlist);
        int count;
 
-       if (cookie1 == 0)
+       if (!cookie1)
                cookie1 = cookie2;
 
        count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
                        list_del(&tx->tx_zc_list);
                        list_add(&tx->tx_zc_list, &zlist);
 
-                       if (--count == 0)
+                       if (!--count)
                                break;
                }
        }
                ksocknal_tx_decref(tx);
        }
 
-       return count == 0 ? 0 : -EPROTO;
+       return !count ? 0 : -EPROTO;
 }
 
 static int
        hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
        hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
 
-       if (the_lnet.ln_testprotocompat != 0) {
+       if (the_lnet.ln_testprotocompat) {
                /* single-shot proto check */
                LNET_LOCK();
-               if ((the_lnet.ln_testprotocompat & 1) != 0) {
+               if (the_lnet.ln_testprotocompat & 1) {
                        hmv->version_major++;   /* just different! */
                        the_lnet.ln_testprotocompat &= ~1;
                }
-               if ((the_lnet.ln_testprotocompat & 2) != 0) {
+               if (the_lnet.ln_testprotocompat & 2) {
                        hmv->magic = LNET_PROTO_MAGIC;
                        the_lnet.ln_testprotocompat &= ~2;
                }
        hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
 
        rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
-       if (rc != 0) {
+       if (rc) {
                CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
                        rc, &conn->ksnc_ipaddr, conn->ksnc_port);
                goto out;
        }
 
-       if (hello->kshm_nips == 0)
+       if (!hello->kshm_nips)
                goto out;
 
        for (i = 0; i < (int) hello->kshm_nips; i++)
        rc = lnet_sock_write(sock, hello->kshm_ips,
                             hello->kshm_nips * sizeof(__u32),
                             lnet_acceptor_timeout());
-       if (rc != 0) {
+       if (rc) {
                CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
                        rc, hello->kshm_nips,
                        &conn->ksnc_ipaddr, conn->ksnc_port);
        hello->kshm_magic   = LNET_PROTO_MAGIC;
        hello->kshm_version = conn->ksnc_proto->pro_version;
 
-       if (the_lnet.ln_testprotocompat != 0) {
+       if (the_lnet.ln_testprotocompat) {
                /* single-shot proto check */
                LNET_LOCK();
-               if ((the_lnet.ln_testprotocompat & 1) != 0) {
+               if (the_lnet.ln_testprotocompat & 1) {
                        hello->kshm_version++;   /* just different! */
                        the_lnet.ln_testprotocompat &= ~1;
                }
 
        rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
                             lnet_acceptor_timeout());
-       if (rc != 0) {
+       if (rc) {
                CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
                        rc, &conn->ksnc_ipaddr, conn->ksnc_port);
                return rc;
        }
 
-       if (hello->kshm_nips == 0)
+       if (!hello->kshm_nips)
                return 0;
 
        rc = lnet_sock_write(sock, hello->kshm_ips,
                             hello->kshm_nips * sizeof(__u32),
                             lnet_acceptor_timeout());
-       if (rc != 0) {
+       if (rc) {
                CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
                        rc, hello->kshm_nips,
                        &conn->ksnc_ipaddr, conn->ksnc_port);
        rc = lnet_sock_read(sock, &hdr->src_nid,
                            sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
                            timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0 && rc != -EALREADY);
                goto out;
        }
 
-       if (hello->kshm_nips == 0)
+       if (!hello->kshm_nips)
                goto out;
 
        rc = lnet_sock_read(sock, hello->kshm_ips,
                            hello->kshm_nips * sizeof(__u32), timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading IPs from ip %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0 && rc != -EALREADY);
        for (i = 0; i < (int) hello->kshm_nips; i++) {
                hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
 
-               if (hello->kshm_ips[i] == 0) {
+               if (!hello->kshm_ips[i]) {
                        CERROR("Zero IP[%d] from ip %pI4h\n",
                               i, &conn->ksnc_ipaddr);
                        rc = -EPROTO;
                            offsetof(ksock_hello_msg_t, kshm_ips) -
                                     offsetof(ksock_hello_msg_t, kshm_src_nid),
                            timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading HELLO from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0 && rc != -EALREADY);
                return -EPROTO;
        }
 
-       if (hello->kshm_nips == 0)
+       if (!hello->kshm_nips)
                return 0;
 
        rc = lnet_sock_read(sock, hello->kshm_ips,
                            hello->kshm_nips * sizeof(__u32), timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading IPs from ip %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
                LASSERT(rc < 0 && rc != -EALREADY);
                if (conn->ksnc_flip)
                        __swab32s(&hello->kshm_ips[i]);
 
-               if (hello->kshm_ips[i] == 0) {
+               if (!hello->kshm_ips[i]) {
                        CERROR("Zero IP[%d] from ip %pI4h\n",
                               i, &conn->ksnc_ipaddr);
                        return -EPROTO;
 
 
                rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
                                       peer_port);
-               if (rc != 0) {
+               if (rc) {
                        if (fatal)
                                goto failed;
                        continue;
                cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
                cr.acr_nid     = peer_nid;
 
-               if (the_lnet.ln_testprotocompat != 0) {
+               if (the_lnet.ln_testprotocompat) {
                        /* single-shot proto check */
                        lnet_net_lock(LNET_LOCK_EX);
-                       if ((the_lnet.ln_testprotocompat & 4) != 0) {
+                       if (the_lnet.ln_testprotocompat & 4) {
                                cr.acr_version++;
                                the_lnet.ln_testprotocompat &= ~4;
                        }
-                       if ((the_lnet.ln_testprotocompat & 8) != 0) {
+                       if (the_lnet.ln_testprotocompat & 8) {
                                cr.acr_magic = LNET_PROTO_MAGIC;
                                the_lnet.ln_testprotocompat &= ~8;
                        }
                }
 
                rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
-               if (rc != 0)
+               if (rc)
                        goto failed_sock;
 
                *sockp = sock;
        LASSERT(sizeof(cr) <= 16);           /* not too big for the stack */
 
        rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
-       LASSERT(rc == 0);                     /* we succeeded before */
+       LASSERT(!rc);                 /* we succeeded before */
 
        if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
                if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
                        rc = lnet_sock_write(sock, &cr, sizeof(cr),
                                             accept_timeout);
 
-                       if (rc != 0)
+                       if (rc)
                                CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
                                       &peer_ip, rc);
                        return -EPROTO;
 
        rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
                            accept_timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading connection request version from %pI4h\n",
                       rc, &peer_ip);
                return -EIO;
                cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
 
                rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
-               if (rc != 0)
+               if (rc)
                        CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
                               peer_version, &peer_ip, rc);
                return -EPROTO;
                            sizeof(cr) -
                            offsetof(lnet_acceptor_connreq_t, acr_nid),
                            accept_timeout);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d reading connection request from %pI4h\n",
                       rc, &peer_ip);
                return -EIO;
 
        rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
                              accept_backlog);
-       if (rc != 0) {
+       if (rc) {
                if (rc == -EADDRINUSE)
                        LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
                                           accept_port);
        lnet_acceptor_state.pta_shutdown = rc;
        complete(&lnet_acceptor_state.pta_signal);
 
-       if (rc != 0)
+       if (rc)
                return rc;
 
        while (!lnet_acceptor_state.pta_shutdown) {
                rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
-               if (rc != 0) {
+               if (rc) {
                        if (rc != -EAGAIN) {
                                CWARN("Accept error %d: pausing...\n", rc);
                                set_current_state(TASK_UNINTERRUPTIBLE);
                }
 
                rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't determine new connection's address\n");
                        goto failed;
                }
 
                rc = lnet_sock_read(newsock, &magic, sizeof(magic),
                                    accept_timeout);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Error %d reading connection request from %pI4h\n",
                               rc, &peer_ip);
                        goto failed;
                }
 
                rc = lnet_accept(newsock, magic);
-               if (rc != 0)
+               if (rc)
                        goto failed;
 
                continue;
        LASSERT(!lnet_acceptor_state.pta_sock);
 
        rc = lnet_acceptor_get_tunables();
-       if (rc != 0)
+       if (rc)
                return rc;
 
        init_completion(&lnet_acceptor_state.pta_signal);
        if (rc <= 0)
                return rc;
 
-       if (lnet_count_acceptor_nis() == 0)  /* not required */
+       if (!lnet_count_acceptor_nis())  /* not required */
                return 0;
 
        rc2 = PTR_ERR(kthread_run(lnet_acceptor,
 
        char *nets;
        int rc;
 
-       if (*networks != 0 && *ip2nets != 0) {
+       if (*networks && *ip2nets) {
                LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
                return NULL;
        }
 
-       if (*ip2nets != 0) {
+       if (*ip2nets) {
                rc = lnet_parse_ip2nets(&nets, ip2nets);
-               return (rc == 0) ? nets : NULL;
+               return !rc ? nets : NULL;
        }
 
-       if (*networks != 0)
+       if (*networks)
                return networks;
 
        return "tcp";
 
        LASSERT(the_lnet.ln_init);
        LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
-       LASSERT(lnd->lnd_refcount == 0);
+       LASSERT(!lnd->lnd_refcount);
 
        list_del(&lnd->lnd_list);
        CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
 {
        int count = 0;
 
-       if (rec->rec_type == 0) /* not set yet, it's uninitialized */
+       if (!rec->rec_type) /* not set yet, it's uninitialized */
                return;
 
        while (!list_empty(&rec->rec_active)) {
        int rc = 0;
        int i;
 
-       LASSERT(rec->rec_type == 0);
+       LASSERT(!rec->rec_type);
 
        rec->rec_type = type;
        INIT_LIST_HEAD(&rec->rec_active);
 
        cfs_percpt_for_each(rec, i, recs) {
                rc = lnet_res_container_setup(rec, i, type);
-               if (rc != 0) {
+               if (rc) {
                        lnet_res_containers_destroy(recs);
                        return NULL;
                }
        struct lnet_res_container **recs;
        int rc = 0;
 
-       LASSERT(the_lnet.ln_refcount == 0);
+       LASSERT(!the_lnet.ln_refcount);
 
        the_lnet.ln_routing = 0;
 
-       LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
+       LASSERT(!(requested_pid & LNET_PID_USERFLAG));
        the_lnet.ln_pid = requested_pid;
 
        INIT_LIST_HEAD(&the_lnet.ln_test_peers);
        INIT_LIST_HEAD(&the_lnet.ln_routers);
 
        rc = lnet_create_remote_nets_table();
-       if (rc != 0)
+       if (rc)
                goto failed;
        /*
         * NB the interface cookie in wire handles guards against delayed
        }
 
        rc = lnet_peer_tables_create();
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        rc = lnet_msg_containers_create();
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
                                      LNET_COOKIE_TYPE_EQ);
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
        the_lnet.ln_md_containers = recs;
 
        rc = lnet_portals_create();
-       if (rc != 0) {
+       if (rc) {
                CERROR("Failed to create portals for LNet: %d\n", rc);
                goto failed;
        }
         */
        lnet_fail_nid(LNET_NID_ANY, 0);
 
-       LASSERT(the_lnet.ln_refcount == 0);
+       LASSERT(!the_lnet.ln_refcount);
        LASSERT(list_empty(&the_lnet.ln_test_peers));
        LASSERT(list_empty(&the_lnet.ln_nis));
        LASSERT(list_empty(&the_lnet.ln_nis_cpt));
 
        /* All quiet on the API front */
        LASSERT(!the_lnet.ln_shutdown);
-       LASSERT(the_lnet.ln_refcount == 0);
+       LASSERT(!the_lnet.ln_refcount);
        LASSERT(list_empty(&the_lnet.ln_nis_zombie));
 
        lnet_net_lock(LNET_LOCK_EX);
                                lnet_ni_t, ni_list);
                list_del_init(&ni->ni_list);
                cfs_percpt_for_each(ref, j, ni->ni_refs) {
-                       if (*ref == 0)
+                       if (!*ref)
                                continue;
                        /* still busy, add it back to zombie list */
                        list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
                goto failed;
 
        rc = lnet_parse_networks(&nilist, nets);
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        while (!list_empty(&nilist)) {
 
                mutex_unlock(&the_lnet.ln_lnd_mutex);
 
-               if (rc != 0) {
+               if (rc) {
                        LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
                                           rc, libcfs_lnd2str(lnd->lnd_type));
                        lnet_net_lock(LNET_LOCK_EX);
                        continue;
                }
 
-               if (ni->ni_peertxcredits == 0 ||
-                   ni->ni_maxtxcredits == 0) {
+               if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
                        LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
                                           libcfs_lnd2str(lnd->lnd_type),
-                                          ni->ni_peertxcredits == 0 ?
+                                          !ni->ni_peertxcredits ?
                                           "" : "per-peer ");
                        goto failed;
                }
                the_lnet.ln_cpt_bits++;
 
        rc = lnet_create_locks();
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create LNet global locks: %d\n", rc);
                return -1;
        }
 lnet_fini(void)
 {
        LASSERT(the_lnet.ln_init);
-       LASSERT(the_lnet.ln_refcount == 0);
+       LASSERT(!the_lnet.ln_refcount);
 
        while (!list_empty(&the_lnet.ln_lnds))
                lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
        }
 
        rc = lnet_prepare(requested_pid);
-       if (rc != 0)
+       if (rc)
                goto failed0;
 
        rc = lnet_startup_lndnis();
-       if (rc != 0)
+       if (rc)
                goto failed1;
 
        rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
-       if (rc != 0)
+       if (rc)
                goto failed2;
 
        rc = lnet_check_routes();
-       if (rc != 0)
+       if (rc)
                goto failed2;
 
        rc = lnet_rtrpools_alloc(im_a_router);
-       if (rc != 0)
+       if (rc)
                goto failed2;
 
        rc = lnet_acceptor_start();
-       if (rc != 0)
+       if (rc)
                goto failed2;
 
        the_lnet.ln_refcount = 1;
         * lnet_router_checker -> lnet_update_ni_status_locked
          */
        rc = lnet_ping_target_init();
-       if (rc != 0)
+       if (rc)
                goto failed3;
 
        rc = lnet_router_checker_start();
-       if (rc != 0)
+       if (rc)
                goto failed4;
 
        lnet_router_debugfs_init();
        case IOC_LIBCFS_ADD_ROUTE:
                rc = lnet_add_route(data->ioc_net, data->ioc_count,
                                    data->ioc_nid, data->ioc_priority);
-               return (rc != 0) ? rc : lnet_check_routes();
+               return (rc) ? rc : lnet_check_routes();
 
        case IOC_LIBCFS_DEL_ROUTE:
                return lnet_del_route(data->ioc_net, data->ioc_nid);
        LASSERT(the_lnet.ln_init);
 
        /* LNetNI initilization failed? */
-       if (the_lnet.ln_refcount == 0)
+       if (!the_lnet.ln_refcount)
                return rc;
 
        cpt = lnet_net_lock_current();
 
        list_for_each(tmp, &the_lnet.ln_nis) {
-               if (index-- != 0)
+               if (index--)
                        continue;
 
                ni = list_entry(tmp, lnet_ni_t, ni_list);
                if (rc == -ENOENT)
                        break;
 
-               LASSERT(rc == 0);
+               LASSERT(!rc);
        }
 
        infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
                lnet_ni_status_t *ns = &pinfo->pi_ni[i];
 
                rc = LNetGetId(i, &id);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
 
                ns->ns_nid    = id.nid;
                ns->ns_status = LNET_NI_STATUS_UP;
        int infosz;
 
        rc = lnet_create_ping_info();
-       if (rc != 0)
+       if (rc)
                return rc;
 
        /*
         * teardown, which by definition is the last one!
         */
        rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't allocate ping EQ: %d\n", rc);
                goto failed_0;
        }
                          LNET_PROTO_PING_MATCHBITS, 0,
                          LNET_UNLINK, LNET_INS_AFTER,
                          &meh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create ping ME: %d\n", rc);
                goto failed_1;
        }
        rc = LNetMDAttach(meh, md,
                          LNET_RETAIN,
                          &the_lnet.ln_ping_target_md);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't attach ping MD: %d\n", rc);
                goto failed_2;
        }
 
  failed_2:
        rc2 = LNetMEUnlink(meh);
-       LASSERT(rc2 == 0);
+       LASSERT(!rc2);
  failed_1:
        rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
-       LASSERT(rc2 == 0);
+       LASSERT(!rc2);
  failed_0:
        lnet_destroy_ping_info();
        return rc;
                /* I expect overflow... */
                LASSERT(rc >= 0 || rc == -EOVERFLOW);
 
-               if (rc == 0) {
+               if (!rc) {
                        /* timed out: provide a diagnostic */
                        CWARN("Still waiting for ping MD to unlink\n");
                        timeout_ms *= 2;
        }
 
        rc = LNetEQFree(the_lnet.ln_ping_target_eq);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
        lnet_destroy_ping_info();
        cfs_restore_sigs(blocked);
 }
 
        /* NB 2 events max (including any unlink event) */
        rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't allocate EQ: %d\n", rc);
                goto out_0;
        }
        md.eq_handle = eqh;
 
        rc = LNetMDBind(md, LNET_UNLINK, &mdh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't bind MD: %d\n", rc);
                goto out_1;
        }
                     LNET_RESERVED_PORTAL,
                     LNET_PROTO_PING_MATCHBITS, 0);
 
-       if (rc != 0) {
+       if (rc) {
                /* Don't CERROR; this could be deliberate! */
 
                rc2 = LNetMDUnlink(mdh);
-               LASSERT(rc2 == 0);
+               LASSERT(!rc2);
 
                /* NB must wait for the UNLINK event below... */
                unlinked = 1;
 
                LASSERT(rc2 != -EOVERFLOW);     /* can't miss anything */
 
-               if (rc2 <= 0 || event.status != 0) {
+               if (rc2 <= 0 || event.status) {
                        /* timeout or error */
-                       if (!replied && rc == 0)
+                       if (!replied && !rc)
                                rc = (rc2 < 0) ? rc2 :
-                                    (rc2 == 0) ? -ETIMEDOUT :
+                                    !rc2 ? -ETIMEDOUT :
                                     event.status;
 
                        if (!unlinked) {
                                /* No assertion (racing with network) */
                                unlinked = 1;
                                timeout_ms = a_long_time;
-                       } else if (rc2 == 0) {
+                       } else if (!rc2) {
                                /* timed out waiting for unlink */
                                CWARN("ping %s: late network completion\n",
                                      libcfs_id2str(id));
                goto out_1;
        }
 
-       if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+       if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
                CERROR("%s: ping w/o NI status: 0x%x\n",
                       libcfs_id2str(id), info->pi_features);
                goto out_1;
 
  out_1:
        rc2 = LNetEQFree(eqh);
-       if (rc2 != 0)
+       if (rc2)
                CERROR("rc2 %d\n", rc2);
-       LASSERT(rc2 == 0);
+       LASSERT(!rc2);
 
  out_0:
        LIBCFS_FREE(info, infosz);
 
        if (!ni)
                goto failed;
 
-       while (str && *str != 0) {
+       while (str && *str) {
                char *comma = strchr(str, ',');
                char *bracket = strchr(str, '(');
                char *square = strchr(str, '[');
 
                        rc = cfs_expr_list_parse(square, tmp - square + 1,
                                                 0, LNET_CPT_NUMBER - 1, &el);
-                       if (rc != 0) {
+                       if (rc) {
                                tmp = square;
                                goto failed_syntax;
                        }
                                *comma++ = 0;
 
                        iface = cfs_trimwhite(iface);
-                       if (*iface == 0) {
+                       if (!*iface) {
                                tmp = iface;
                                goto failed_syntax;
                        }
                if (comma) {
                        *comma = 0;
                        str = cfs_trimwhite(str);
-                       if (*str != 0) {
+                       if (*str) {
                                tmp = str;
                                goto failed_syntax;
                        }
                }
 
                str = cfs_trimwhite(str);
-               if (*str != 0) {
+               if (*str) {
                        tmp = str;
                        goto failed_syntax;
                }
                        str++;
 
                /* scan for separator or comment */
-               for (sep = str; *sep != 0; sep++)
+               for (sep = str; *sep; sep++)
                        if (lnet_issep(*sep) || *sep == '#')
                                break;
 
                        /* scan for separator */
                        do {
                                sep++;
-                       } while (*sep != 0 && !lnet_issep(*sep));
+                       } while (*sep && !lnet_issep(*sep));
                }
 
-               if (*sep == 0)
+               if (!*sep)
                        break;
 
                str = sep + 1;
                                /* simple string enumeration */
                                if (lnet_expand1tb(&pending, str, sep, sep2,
                                                   parsed,
-                                                  (int)(enditem - parsed)) != 0) {
+                                                  (int)(enditem - parsed))) {
                                        goto failed;
                                }
                                continue;
                        goto failed;
 
                if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
-                   (hi - lo) % stride != 0)
+                   (hi - lo) % stride)
                        goto failed;
 
                for (i = lo; i <= hi; i += stride) {
                                goto failed;
 
                        if (lnet_expand1tb(&pending, str, sep, sep2,
-                                          num, nob) != 0)
+                                          num, nob))
                                goto failed;
                }
        }
                /* scan for token start */
                while (isspace(*sep))
                        sep++;
-               if (*sep == 0) {
+               if (!*sep) {
                        if (ntokens < (got_hops ? 3 : 2))
                                goto token_error;
                        break;
                token = sep++;
 
                /* scan for token end */
-               while (*sep != 0 && !isspace(*sep))
+               while (*sep && !isspace(*sep))
                        sep++;
-               if (*sep != 0)
+               if (*sep)
                        *sep++ = 0;
 
                if (ntokens == 1) {
                        }
 
                        rc = lnet_add_route(net, hops, nid, priority);
-                       if (rc != 0) {
+                       if (rc) {
                                CERROR("Can't create route to %s via %s\n",
                                       libcfs_net2str(net),
                                       libcfs_nid2str(nid));
                rc = lnet_parse_route_tbs(&tbs, im_a_router);
        }
 
-       LASSERT(lnet_tbnob == 0);
+       LASSERT(!lnet_tbnob);
        return rc;
 }
 
        int i;
 
        rc = cfs_ip_addr_parse(token, len, &list);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        for (rc = i = 0; !rc && i < nip; i++)
                /* scan for token start */
                while (isspace(*sep))
                        sep++;
-               if (*sep == 0)
+               if (!*sep)
                        break;
 
                token = sep++;
 
                /* scan for token end */
-               while (*sep != 0 && !isspace(*sep))
+               while (*sep && !isspace(*sep))
                        sep++;
-               if (*sep != 0)
+               if (*sep)
                        *sep++ = 0;
 
-               if (ntokens++ == 0) {
+               if (!ntokens++) {
                        net = token;
                        continue;
                }
                        return rc;
                }
 
-               matched |= (rc != 0);
+               if (rc)
+                       matched |= 1;
        }
 
        if (!matched)
                        bracket = strchr(bracket + 1, ')');
 
                        if (!bracket ||
-                           !(bracket[1] == ',' || bracket[1] == 0)) {
+                           !(bracket[1] == ',' || !bracket[1])) {
                                lnet_syntax("ip2nets", source, offset2, len);
                                return -EINVAL;
                        }
 
-                       sep = (bracket[1] == 0) ? NULL : bracket + 1;
+                       sep = !bracket[1] ? NULL : bracket + 1;
                }
 
                if (sep)
        INIT_LIST_HEAD(&raw_entries);
        if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
                CERROR("Error parsing ip2nets\n");
-               LASSERT(lnet_tbnob == 0);
+               LASSERT(!lnet_tbnob);
                return -EINVAL;
        }
 
 
                list_del(&tb->ltb_list);
 
-               if (rc == 0) {            /* no match */
+               if (!rc) {                /* no match */
                        lnet_free_text_buf(tb);
                        continue;
                }
                        list_add_tail(&tb->ltb_list, &matched_nets);
 
                        len += snprintf(networks + len, sizeof(networks) - len,
-                                       "%s%s", (len == 0) ? "" : ",",
+                                       "%s%s", !len ? "" : ",",
                                        tb->ltb_text);
 
                        if (len >= sizeof(networks)) {
        lnet_free_text_bufs(&raw_entries);
        lnet_free_text_bufs(&matched_nets);
        lnet_free_text_bufs(¤t_nets);
-       LASSERT(lnet_tbnob == 0);
+       LASSERT(!lnet_tbnob);
 
        if (rc < 0)
                return rc;
                        continue;
 
                rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
-               if (rc != 0) {
+               if (rc) {
                        CWARN("Can't query interface %s: %d\n",
                              ifnames[i], rc);
                        continue;
                return nip;
        }
 
-       if (nip == 0) {
+       if (!nip) {
                LCONSOLE_ERROR_MSG(0x118,
                                   "No local IP interfaces for ip2nets to match\n");
                return -ENOENT;
                return rc;
        }
 
-       if (rc == 0) {
+       if (!rc) {
                LCONSOLE_ERROR_MSG(0x11a,
                                   "ip2nets does not match any local IP interfaces\n");
                return -ENOENT;
 
        if (count)
                count = roundup_pow_of_two(count);
 
-       if (callback != LNET_EQ_HANDLER_NONE && count != 0)
+       if (callback != LNET_EQ_HANDLER_NONE && count)
                CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
 
        /*
         * count can be 0 if only need callback, we can eliminate
         * overhead of enqueue event
         */
-       if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
+       if (!count && callback == LNET_EQ_HANDLER_NONE)
                return -EINVAL;
 
        eq = lnet_eq_alloc();
        if (!eq)
                return -ENOMEM;
 
-       if (count != 0) {
+       if (count) {
                LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
                if (!eq->eq_events)
                        goto failed;
 
        cfs_percpt_for_each(ref, i, eq->eq_refs) {
                LASSERT(*ref >= 0);
-               if (*ref == 0)
+               if (!*ref)
                        continue;
 
                CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
        /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
        int index;
 
-       if (eq->eq_size == 0) {
+       if (!eq->eq_size) {
                LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
                eq->eq_callback(ev);
                return;
        wait_queue_t wl;
        unsigned long now;
 
-       if (tms == 0)
+       if (!tms)
                return -1; /* don't want to wait and no new event */
 
        init_waitqueue_entry(&wl, current);
                        tms = 0;
        }
 
-       wait = tms != 0; /* might need to call here again */
+       wait = tms; /* might need to call here again */
        *timeout_ms = tms;
 
        lnet_eq_wait_lock();
                        }
 
                        rc = lnet_eq_dequeue_event(eq, event);
-                       if (rc != 0) {
+                       if (rc) {
                                lnet_eq_wait_unlock();
                                *which = i;
                                return rc;
                        }
                }
 
-               if (wait == 0)
+               if (!wait)
                        break;
 
                /*
 
 void
 lnet_md_unlink(lnet_libmd_t *md)
 {
-       if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
+       if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
                /* first unlink attempt... */
                lnet_me_t *me = md->md_me;
 
                lnet_res_lh_invalidate(&md->md_lh);
        }
 
-       if (md->md_refcount != 0) {
+       if (md->md_refcount) {
                CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
                return;
        }
        lmd->md_refcount = 0;
        lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
 
-       if ((umd->options & LNET_MD_IOVEC) != 0) {
-               if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
+       if (umd->options & LNET_MD_IOVEC) {
+               if (umd->options & LNET_MD_KIOV) /* Can't specify both */
                        return -EINVAL;
 
                niov = umd->length;
 
                lmd->md_length = total_length;
 
-               if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */
+               if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
                    (umd->max_size < 0 ||
                     umd->max_size > total_length)) /* illegal max_size */
                        return -EINVAL;
 
-       } else if ((umd->options & LNET_MD_KIOV) != 0) {
+       } else if (umd->options & LNET_MD_KIOV) {
                niov = umd->length;
                lmd->md_niov = umd->length;
                memcpy(lmd->md_iov.kiov, umd->start,
 
                lmd->md_length = total_length;
 
-               if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+               if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
                    (umd->max_size < 0 ||
                     umd->max_size > total_length)) /* illegal max_size */
                        return -EINVAL;
                lmd->md_iov.iov[0].iov_base = umd->start;
                lmd->md_iov.iov[0].iov_len = umd->length;
 
-               if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+               if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
                    (umd->max_size < 0 ||
                     umd->max_size > (int)umd->length)) /* illegal max_size */
                        return -EINVAL;
         * and that's all.
         */
        umd->start = lmd->md_start;
-       umd->length = ((lmd->md_options &
-                       (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
+       umd->length = !(lmd->md_options &
+                     (LNET_MD_IOVEC | LNET_MD_KIOV)) ?
                      lmd->md_length : lmd->md_niov;
        umd->threshold = lmd->md_threshold;
        umd->max_size = lmd->md_max_size;
 static int
 lnet_md_validate(lnet_md_t *umd)
 {
-       if (!umd->start && umd->length != 0) {
+       if (!umd->start && umd->length) {
                CERROR("MD start pointer can not be NULL with length %u\n",
                       umd->length);
                return -EINVAL;
        }
 
-       if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+       if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
            umd->length > LNET_MAX_IOV) {
                CERROR("Invalid option: too many fragments %u, %d max\n",
                       umd->length, LNET_MAX_IOV);
        LASSERT(the_lnet.ln_init);
        LASSERT(the_lnet.ln_refcount > 0);
 
-       if (lnet_md_validate(&umd) != 0)
+       if (lnet_md_validate(&umd))
                return -EINVAL;
 
-       if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
+       if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
                CERROR("Invalid option: no MD_OP set\n");
                return -EINVAL;
        }
        cpt = lnet_cpt_of_cookie(meh.cookie);
 
        lnet_res_lock(cpt);
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        me = lnet_handle2me(&meh);
        else
                rc = lnet_md_link(md, umd.eq_handle, cpt);
 
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        /*
        LASSERT(the_lnet.ln_init);
        LASSERT(the_lnet.ln_refcount > 0);
 
-       if (lnet_md_validate(&umd) != 0)
+       if (lnet_md_validate(&umd))
                return -EINVAL;
 
-       if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
+       if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
                CERROR("Invalid option: GET|PUT illegal on active MDs\n");
                return -EINVAL;
        }
        rc = lnet_md_build(md, &umd, unlink);
 
        cpt = lnet_res_lock_current();
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        rc = lnet_md_link(md, umd.eq_handle, cpt);
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        lnet_md2handle(handle, md);
         * when the LND is done, the completion event flags that the MD was
         * unlinked.  Otherwise, we enqueue an event now...
         */
-       if (md->md_eq && md->md_refcount == 0) {
+       if (md->md_eq && !md->md_refcount) {
                lnet_build_unlink_event(md, &ev);
                lnet_eq_enqueue_event(md->md_eq, &ev);
        }
 
 
        lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
                               &me->me_lh);
-       if (ignore_bits != 0)
+       if (ignore_bits)
                head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
        else
                head = lnet_mt_match_head(mtable, match_id, match_bits);
        md = me->me_md;
        if (md) {
                md->md_flags |= LNET_MD_FLAG_ABORTED;
-               if (md->md_eq && md->md_refcount == 0) {
+               if (md->md_eq && !md->md_refcount) {
                        lnet_build_unlink_event(md, &ev);
                        lnet_eq_enqueue_event(md->md_eq, &ev);
                }
 
        LASSERT(the_lnet.ln_init);
 
        /* NB: use lnet_net_lock(0) to serialize operations on test peers */
-       if (threshold != 0) {
+       if (threshold) {
                /* Adding a new entry */
                LIBCFS_ALLOC(tp, sizeof(*tp));
                if (!tp)
        list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
                tp = list_entry(el, lnet_test_peer_t, tp_list);
 
-               if (tp->tp_threshold == 0 ||    /* needs culling anyway */
+               if (!tp->tp_threshold ||    /* needs culling anyway */
                    nid == LNET_NID_ANY ||       /* removing all entries */
                    tp->tp_nid == nid) {          /* matched this one */
                        list_del(&tp->tp_list);
        list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
                tp = list_entry(el, lnet_test_peer_t, tp_list);
 
-               if (tp->tp_threshold == 0) {
+               if (!tp->tp_threshold) {
                        /* zombie entry */
                        if (outgoing) {
                                /*
                        if (tp->tp_threshold != LNET_MD_THRESH_INF) {
                                tp->tp_threshold--;
                                if (outgoing &&
-                                   tp->tp_threshold == 0) {
+                                   !tp->tp_threshold) {
                                        /* see above */
                                        list_del(&tp->tp_list);
                                        list_add(&tp->tp_list, &cull);
        /* NB diov, siov are READ-ONLY */
        unsigned int this_nob;
 
-       if (nob == 0)
+       if (!nob)
                return;
 
        /* skip complete frags before 'doffset' */
        unsigned int frag_len;
        unsigned int niov;
 
-       if (len == 0)                      /* no data => */
+       if (!len)                          /* no data => */
                return 0;                    /* no frags */
 
        LASSERT(src_niov > 0);
        char *daddr = NULL;
        char *saddr = NULL;
 
-       if (nob == 0)
+       if (!nob)
                return;
 
        LASSERT(!in_interrupt());
        unsigned int this_nob;
        char *addr = NULL;
 
-       if (nob == 0)
+       if (!nob)
                return;
 
        LASSERT(!in_interrupt());
        unsigned int this_nob;
        char *addr = NULL;
 
-       if (nob == 0)
+       if (!nob)
                return;
 
        LASSERT(!in_interrupt());
        unsigned int frag_len;
        unsigned int niov;
 
-       if (len == 0)                      /* no data => */
+       if (!len)                          /* no data => */
                return 0;                    /* no frags */
 
        LASSERT(src_niov > 0);
        int rc;
 
        LASSERT(!in_interrupt());
-       LASSERT(mlen == 0 || msg);
+       LASSERT(!mlen || msg);
 
        if (msg) {
                LASSERT(msg->msg_receiving);
 
                msg->msg_receiving = 0;
 
-               if (mlen != 0) {
+               if (mlen) {
                        niov = msg->msg_niov;
                        iov  = msg->msg_iov;
                        kiov = msg->msg_kiov;
        LASSERT(msg->msg_len > 0);
        LASSERT(!msg->msg_routing);
        LASSERT(md);
-       LASSERT(msg->msg_niov == 0);
+       LASSERT(!msg->msg_niov);
        LASSERT(!msg->msg_iov);
        LASSERT(!msg->msg_kiov);
 
        msg->msg_niov = md->md_niov;
-       if ((md->md_options & LNET_MD_KIOV) != 0)
+       if (md->md_options & LNET_MD_KIOV)
                msg->msg_kiov = md->md_iov.kiov;
        else
                msg->msg_iov = md->md_iov.iov;
        msg->msg_len = len;
        msg->msg_offset = offset;
 
-       if (len != 0)
+       if (len)
                lnet_setpayloadbuffer(msg);
 
        memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
        msg->msg_rx_ready_delay = 1;
        rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
                                        &msg->msg_private);
-       if (rc != 0) {
+       if (rc) {
                CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
                       libcfs_nid2str(msg->msg_rxpeer->lp_nid),
                       libcfs_id2str(msg->msg_target), rc);
 
        lp->lp_last_query = cfs_time_current();
 
-       if (last_alive != 0) /* NI has updated timestamp */
+       if (last_alive) /* NI has updated timestamp */
                lp->lp_last_alive = last_alive;
 }
 
         * case, and moreover lp_last_alive at peer creation is assumed.
         */
        if (alive && !lp->lp_alive &&
-           !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
+           !(lnet_isrouter(lp) && !lp->lp_alive_count))
                lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
 
        return alive;
         * Peer appears dead, but we should avoid frequent NI queries (at
         * most once per lnet_queryinterval seconds).
         */
-       if (lp->lp_last_query != 0) {
+       if (lp->lp_last_query) {
                static const int lnet_queryinterval = 1;
 
                unsigned long next_query =
        LASSERT(msg->msg_tx_committed);
 
        /* NB 'lp' is always the next hop */
-       if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
-           lnet_peer_alive_locked(lp) == 0) {
+       if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
+           !lnet_peer_alive_locked(lp)) {
                the_lnet.ln_counters[cpt]->drop_count++;
                the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
                lnet_net_unlock(cpt);
        }
 
        if (msg->msg_md &&
-           (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
+           (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
                lnet_net_unlock(cpt);
 
                CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
 
        LASSERT(!msg->msg_iov);
        LASSERT(!msg->msg_kiov);
-       LASSERT(msg->msg_niov == 0);
+       LASSERT(!msg->msg_niov);
        LASSERT(msg->msg_routing);
        LASSERT(msg->msg_receiving);
        LASSERT(!msg->msg_sending);
                lp = rtr->lr_gateway;
 
                if (!lp->lp_alive || /* gateway is down */
-                   ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
-                    rtr->lr_downis != 0)) /* NI to target is down */
+                   ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) &&
+                    rtr->lr_downis)) /* NI to target is down */
                        continue;
 
                if (ni && lp->lp_ni != ni)
                rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
                /* lp has ref on src_ni; lose mine */
                lnet_ni_decref_locked(src_ni, cpt);
-               if (rc != 0) {
+               if (rc) {
                        lnet_net_unlock(cpt);
                        LCONSOLE_WARN("Error %d finding peer %s\n", rc,
                                      libcfs_nid2str(dst_nid));
        if (rc == EHOSTUNREACH || rc == ECANCELED)
                return -rc;
 
-       if (rc == 0)
+       if (!rc)
                lnet_ni_send(src_ni, msg);
 
-       return 0; /* rc == 0 or EAGAIN */
+       return 0; /* !rc or EAGAIN */
 }
 
 static void
 {
        lnet_hdr_t *hdr = &msg->msg_hdr;
 
-       if (msg->msg_wanted != 0)
+       if (msg->msg_wanted)
                lnet_setpayloadbuffer(msg);
 
        lnet_build_msg_event(msg, LNET_EVENT_PUT);
         * Must I ACK?  If so I'll grab the ack_wmd out of the header and put
         * it back into the ACK during lnet_finalize()
         */
-       msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
-                       (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
+       msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
+                      !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
 
        lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
                     msg->msg_offset, msg->msg_wanted, hdr->payload_length);
                        return 0;
 
                rc = lnet_ni_eager_recv(ni, msg);
-               if (rc == 0)
+               if (!rc)
                        goto again;
                /* fall through */
 
 
        /* NB handles only looked up by creator (no flips) */
        md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
-       if (!md || md->md_threshold == 0 || md->md_me) {
+       if (!md || !md->md_threshold || md->md_me) {
                CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
                        !md ? "invalid" : "inactive",
                return ENOENT;            /* +ve: OK but no match */
        }
 
-       LASSERT(md->md_offset == 0);
+       LASSERT(!md->md_offset);
 
        rlength = hdr->payload_length;
        mlength = min_t(uint, rlength, md->md_length);
 
        if (mlength < rlength &&
-           (md->md_options & LNET_MD_TRUNCATE) == 0) {
+           !(md->md_options & LNET_MD_TRUNCATE)) {
                CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
                        libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
                        rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
 
        lnet_msg_attach_md(msg, md, 0, mlength);
 
-       if (mlength != 0)
+       if (mlength)
                lnet_setpayloadbuffer(msg);
 
        lnet_res_unlock(cpt);
 
        /* NB handles only looked up by creator (no flips) */
        md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
-       if (!md || md->md_threshold == 0 || md->md_me) {
+       if (!md || !md->md_threshold || md->md_me) {
                /* Don't moan; this is expected */
                CDEBUG(D_NET,
                       "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
                }
        }
 
-       if (rc == 0)
+       if (!rc)
                rc = lnet_post_routed_recv_locked(msg, 0);
        return rc;
 }
 
        lnet_net_lock(cpt);
        rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
-       if (rc != 0) {
+       if (rc) {
                lnet_net_unlock(cpt);
                CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
                       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
 
                if (rc < 0)
                        goto free_drop;
-               if (rc == 0) {
+               if (!rc) {
                        lnet_ni_recv(ni, msg->msg_private, msg, 0,
                                     0, payload_length, payload_length);
                }
                goto free_drop;  /* prevent an unused label if !kernel */
        }
 
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        LASSERT(rc == ENOENT);
        lnet_res_lock(cpt);
 
        md = lnet_handle2md(&mdh);
-       if (!md || md->md_threshold == 0 || md->md_me) {
+       if (!md || !md->md_threshold || md->md_me) {
                CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
                       match_bits, portal, libcfs_id2str(target),
                       !md ? -1 : md->md_threshold);
        lnet_build_msg_event(msg, LNET_EVENT_SEND);
 
        rc = lnet_send(self, msg, LNET_NID_ANY);
-       if (rc != 0) {
+       if (rc) {
                CNETERR("Error sending PUT to %s: %d\n",
                        libcfs_id2str(target), rc);
                lnet_finalize(NULL, msg, rc);
                goto drop;
        }
 
-       if (getmd->md_threshold == 0) {
+       if (!getmd->md_threshold) {
                CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
                       libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
                       getmd);
                goto drop;
        }
 
-       LASSERT(getmd->md_offset == 0);
+       LASSERT(!getmd->md_offset);
 
        CDEBUG(D_NET, "%s: Reply from %s md %p\n",
               libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
        lnet_res_lock(cpt);
 
        md = lnet_handle2md(&mdh);
-       if (!md || md->md_threshold == 0 || md->md_me) {
+       if (!md || !md->md_threshold || md->md_me) {
                CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
                       match_bits, portal, libcfs_id2str(target),
                       !md ? -1 : md->md_threshold);
 
        lnet_event_t *ev = &msg->msg_ev;
 
        LASSERT(msg->msg_tx_committed);
-       if (status != 0)
+       if (status)
                goto out;
 
        counters = the_lnet.ln_counters[msg->msg_tx_cpt];
        default: /* routed message */
                LASSERT(msg->msg_routing);
                LASSERT(msg->msg_rx_committed);
-               LASSERT(ev->type == 0);
+               LASSERT(!ev->type);
 
                counters->route_length += msg->msg_len;
                counters->route_count++;
        LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
        LASSERT(msg->msg_rx_committed);
 
-       if (status != 0)
+       if (status)
                goto out;
 
        counters = the_lnet.ln_counters[msg->msg_rx_cpt];
        switch (ev->type) {
        default:
-               LASSERT(ev->type == 0);
+               LASSERT(!ev->type);
                LASSERT(msg->msg_routing);
                goto out;
 
 
        LASSERT(msg->msg_onactivelist);
 
-       if (status == 0 && msg->msg_ack) {
+       if (!status && msg->msg_ack) {
                /* Only send an ACK if the PUT completed successfully */
 
                lnet_msg_decommit(msg, cpt, 0);
                 */
                return rc;
 
-       } else if (status == 0 &&       /* OK so far */
+       } else if (!status &&   /* OK so far */
                   (msg->msg_routing && !msg->msg_sending)) {
                /* not forwarded */
                LASSERT(!msg->msg_receiving);   /* called back recv already */
                 * anything, so my finalizing friends can chomp along too
                 */
                rc = lnet_complete_msg_locked(msg, cpt);
-               if (rc != 0)
+               if (rc)
                        break;
        }
 
        container->msc_finalizers[my_slot] = NULL;
        lnet_net_unlock(cpt);
 
-       if (rc != 0)
+       if (rc)
                goto again;
 }
 EXPORT_SYMBOL(lnet_finalize);
 {
        int count = 0;
 
-       if (container->msc_init == 0)
+       if (!container->msc_init)
                return;
 
        while (!list_empty(&container->msc_active)) {
 
        rc = lnet_freelist_init(&container->msc_freelist,
                                LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Failed to init freelist for message container\n");
                lnet_msg_container_cleanup(container);
                return rc;
 
        cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
                rc = lnet_msg_container_setup(container, i);
-               if (rc != 0) {
+               if (rc) {
                        lnet_msg_containers_destroy();
                        return rc;
                }
 
        struct lnet_portal *ptl = the_lnet.ln_portals[index];
        int unique;
 
-       unique = ignore_bits == 0 &&
+       unique = !ignore_bits &&
                 match_id.nid != LNET_NID_ANY &&
                 match_id.pid != LNET_PID_ANY;
 
                return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
 
        /* mismatched MD op */
-       if ((md->md_options & info->mi_opc) == 0)
+       if (!(md->md_options & info->mi_opc))
                return LNET_MATCHMD_NONE;
 
        /* mismatched ME nid/pid? */
                return LNET_MATCHMD_NONE;
 
        /* mismatched ME matchbits? */
-       if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
+       if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits)
                return LNET_MATCHMD_NONE;
 
        /* Hurrah! This _is_ a match; check it out... */
 
-       if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
+       if (!(md->md_options & LNET_MD_MANAGE_REMOTE))
                offset = md->md_offset;
        else
                offset = info->mi_roffset;
 
-       if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
+       if (md->md_options & LNET_MD_MAX_SIZE) {
                mlength = md->md_max_size;
                LASSERT(md->md_offset + mlength <= md->md_length);
        } else {
 
        if (info->mi_rlength <= mlength) {      /* fits in allowed space */
                mlength = info->mi_rlength;
-       } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
+       } else if (!(md->md_options & LNET_MD_TRUNCATE)) {
                /* this packet _really_ is too big */
                CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
                       libcfs_id2str(info->mi_id), info->mi_mbits,
         * We bumped md->md_refcount above so the MD just gets flagged
         * for unlink when it is finalized.
         */
-       if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
+       if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK)
                lnet_md_unlink(md);
 
        return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
                /* is there any active entry for this portal? */
                nmaps = ptl->ptl_mt_nmaps;
                /* map to an active mtable to avoid heavy "stealing" */
-               if (nmaps != 0) {
+               if (nmaps) {
                        /*
                         * NB: there is possibility that ptl_mt_maps is being
                         * changed because we are not under protection of
        bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
        pos &= (1 << LNET_MT_BITS_U64) - 1;
 
-       return ((*bmap) & (1ULL << pos)) != 0;
+       return (*bmap & (1ULL << pos));
 }
 
 static void
                LASSERT(me == me->me_md->md_me);
 
                rc = lnet_try_match_md(me->me_md, info, msg);
-               if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
+               if (!(rc & LNET_MATCHMD_EXHAUSTED))
                        exhausted = 0; /* mlist is not empty */
 
-               if ((rc & LNET_MATCHMD_FINISH) != 0) {
+               if (rc & LNET_MATCHMD_FINISH) {
                        /*
                         * don't return EXHAUSTED bit because we don't know
                         * whether the mlist is empty or not
                        exhausted = 0;
        }
 
-       if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
+       if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
                head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
                goto again; /* re-check MEs w/o ignore-bits */
        }
 
                cpt = (first + i) % LNET_CPT_NUMBER;
                mtable = ptl->ptl_mtables[cpt];
-               if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
+               if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
                        continue;
 
                lnet_res_lock(cpt);
                lnet_ptl_lock(ptl);
 
-               if (i == 0) { /* the first try, attach on stealing list */
+               if (!i) { /* the first try, attach on stealing list */
                        list_add_tail(&msg->msg_list,
                                      &ptl->ptl_msg_stealing);
                }
                if (!list_empty(&msg->msg_list)) { /* on stealing list */
                        rc = lnet_mt_match_md(mtable, info, msg);
 
-                       if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
+                       if ((rc & LNET_MATCHMD_EXHAUSTED) &&
                            mtable->mt_enabled)
                                lnet_ptl_disable_mt(ptl, cpt);
 
-                       if ((rc & LNET_MATCHMD_FINISH) != 0)
+                       if (rc & LNET_MATCHMD_FINISH)
                                list_del_init(&msg->msg_list);
 
                } else {
 
                if (!list_empty(&msg->msg_list) && /* not matched yet */
                    (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
-                    ptl->ptl_mt_nmaps == 0 ||   /* no active CPT */
+                    !ptl->ptl_mt_nmaps ||   /* no active CPT */
                     (ptl->ptl_mt_nmaps == 1 &&  /* the only active CPT */
                      ptl->ptl_mt_maps[0] == cpt))) {
                        /* nothing to steal, delay or drop */
                lnet_ptl_unlock(ptl);
                lnet_res_unlock(cpt);
 
-               if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
+               if ((rc & LNET_MATCHMD_FINISH) || msg->msg_rx_delayed)
                        break;
        }
 
 
        ptl = the_lnet.ln_portals[info->mi_portal];
        rc = lnet_ptl_match_early(ptl, msg);
-       if (rc != 0) /* matched or delayed early message */
+       if (rc) /* matched or delayed early message */
                return rc;
 
        mtable = lnet_mt_of_match(info, msg);
        }
 
        rc = lnet_mt_match_md(mtable, info, msg);
-       if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
+       if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) {
                lnet_ptl_lock(ptl);
                lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
                lnet_ptl_unlock(ptl);
        }
 
-       if ((rc & LNET_MATCHMD_FINISH) != 0)    /* matched or dropping */
+       if (rc & LNET_MATCHMD_FINISH)   /* matched or dropping */
                goto out1;
 
        if (!msg->msg_rx_ready_delay)
        int exhausted = 0;
        int cpt;
 
-       LASSERT(md->md_refcount == 0); /* a brand new MD */
+       LASSERT(!md->md_refcount); /* a brand new MD */
 
        me->me_md = md;
        md->md_me = me;
 
                rc = lnet_try_match_md(md, &info, msg);
 
-               exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
-               if ((rc & LNET_MATCHMD_NONE) != 0) {
+               exhausted = (rc & LNET_MATCHMD_EXHAUSTED);
+               if (rc & LNET_MATCHMD_NONE) {
                        if (exhausted)
                                break;
                        continue;
                }
 
                /* Hurrah! This _is_ a match */
-               LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
+               LASSERT(rc & LNET_MATCHMD_FINISH);
                list_del_init(&msg->msg_list);
 
                if (head == &ptl->ptl_msg_stealing) {
                        continue;
                }
 
-               if ((rc & LNET_MATCHMD_OK) != 0) {
+               if (rc & LNET_MATCHMD_OK) {
                        list_add_tail(&msg->msg_list, matches);
 
                        CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
 
        int rc;
 
        rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create socket: %d\n", rc);
                return rc;
        }
 
        strcpy(ifr.ifr_name, name);
        rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't get flags for interface %s\n", name);
                return rc;
        }
 
-       if ((ifr.ifr_flags & IFF_UP) == 0) {
+       if (!(ifr.ifr_flags & IFF_UP)) {
                CDEBUG(D_NET, "Interface %s down\n", name);
                *up = 0;
                *ip = *mask = 0;
        strcpy(ifr.ifr_name, name);
        ifr.ifr_addr.sa_family = AF_INET;
        rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't get IP address for interface %s\n", name);
                return rc;
        }
        strcpy(ifr.ifr_name, name);
        ifr.ifr_addr.sa_family = AF_INET;
        rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't get netmask for interface %s\n", name);
                return rc;
        }
                        goto out1;
                }
 
-               LASSERT(rc == 0);
+               LASSERT(!rc);
 
                nfound = ifc.ifc_len / sizeof(*ifr);
                LASSERT(nfound <= nalloc);
                nalloc *= 2;
        }
 
-       if (nfound == 0)
+       if (!nfound)
                goto out1;
 
        LIBCFS_ALLOC(names, nfound * sizeof(*names));
                        .iov_len  = nob
                };
                struct msghdr msg = {
-                       .msg_flags      = (timeout == 0) ? MSG_DONTWAIT : 0
+                       .msg_flags      = !timeout ? MSG_DONTWAIT : 0
                };
 
-               if (timeout != 0) {
+               if (timeout) {
                        /* Set send timeout to remaining time */
                        tv = (struct timeval) {
                                .tv_sec = ticks / HZ,
                        };
                        rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
                                               (char *)&tv, sizeof(tv));
-                       if (rc != 0) {
+                       if (rc) {
                                CERROR("Can't set socket send timeout %ld.%06d: %d\n",
                                       (long)tv.tv_sec, (int)tv.tv_usec, rc);
                                return rc;
                if (rc < 0)
                        return rc;
 
-               if (rc == 0) {
+               if (!rc) {
                        CERROR("Unexpected zero rc\n");
                        return -ECONNABORTED;
                }
                };
                rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
                                       (char *)&tv, sizeof(tv));
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
                               (long)tv.tv_sec, (int)tv.tv_usec, rc);
                        return rc;
                if (rc < 0)
                        return rc;
 
-               if (rc == 0)
+               if (!rc)
                        return -ECONNRESET;
 
                buffer = ((char *)buffer) + rc;
                nob -= rc;
 
-               if (nob == 0)
+               if (!nob)
                        return 0;
 
                if (ticks <= 0)
 
        rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
        *sockp = sock;
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create socket: %d\n", rc);
                return rc;
        }
        option = 1;
        rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
                               (char *)&option, sizeof(option));
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
                goto failed;
        }
 
-       if (local_ip != 0 || local_port != 0) {
+       if (local_ip || local_port) {
                memset(&locaddr, 0, sizeof(locaddr));
                locaddr.sin_family = AF_INET;
                locaddr.sin_port = htons(local_port);
-               locaddr.sin_addr.s_addr = (local_ip == 0) ?
+               locaddr.sin_addr.s_addr = !local_ip ?
                                          INADDR_ANY : htonl(local_ip);
 
                rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
                        *fatal = 0;
                        goto failed;
                }
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Error trying to bind to port %d: %d\n",
                               local_port, rc);
                        goto failed;
        int option;
        int rc;
 
-       if (txbufsize != 0) {
+       if (txbufsize) {
                option = txbufsize;
                rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
                                       (char *)&option, sizeof(option));
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't set send buffer %d: %d\n",
                               option, rc);
                        return rc;
                }
        }
 
-       if (rxbufsize != 0) {
+       if (rxbufsize) {
                option = rxbufsize;
                rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
                                       (char *)&option, sizeof(option));
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't set receive buffer %d: %d\n",
                               option, rc);
                        return rc;
                rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
        else
                rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Error %d getting sock %s IP/port\n",
                       rc, remote ? "peer" : "local");
                return rc;
        int rc;
 
        rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
-       if (rc != 0) {
+       if (rc) {
                if (!fatal)
                        CERROR("Can't create socket: port %d already in use\n",
                               local_port);
        }
 
        rc = kernel_listen(*sockp, backlog);
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
                rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
        }
 
-       if (rc != 0)
+       if (rc)
                goto failed;
 
        *newsockp = newsock;
        int rc;
 
        rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        memset(&srvaddr, 0, sizeof(srvaddr));
 
        rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
                            sizeof(srvaddr), 0);
-       if (rc == 0)
+       if (!rc)
                return 0;
 
        /*
 
        mutex_unlock(&the_lnet.ln_api_mutex);
 
        mutex_unlock(&lnet_config_mutex);
-       return (refcount == 0) ? 0 : -EBUSY;
+       return !refcount ? 0 : -EBUSY;
 }
 
 static int
        mutex_init(&lnet_config_mutex);
 
        rc = lnet_init();
-       if (rc != 0) {
+       if (rc) {
                CERROR("lnet_init: error %d\n", rc);
                return rc;
        }
 
        rc = libcfs_register_ioctl(&lnet_ioctl_handler);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        if (config_on_load) {
                /*
        int rc;
 
        rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        lnet_fini();
 }
 
        if (!nf)
                return NULL;
        endlen = src->ls_len - strlen(nf->nf_name);
-       if (endlen == 0)
+       if (!endlen)
                /* network name only, e.g. "elan" or "tcp" */
                netnum = 0;
        else {
        struct nidrange *nr;
 
        tmp = *src;
-       if (cfs_gettok(src, '@', &addrrange) == 0)
+       if (!cfs_gettok(src, '@', &addrrange))
                goto failed;
 
-       if (cfs_gettok(src, '@', &net) == 0 || src->ls_str)
+       if (!cfs_gettok(src, '@', &net) || src->ls_str)
                goto failed;
 
        nr = add_nidrange(&net, nidlist);
        if (!nr)
                goto failed;
 
-       if (parse_addrange(&addrrange, nr) != 0)
+       if (parse_addrange(&addrrange, nr))
                goto failed;
 
        return 1;
        INIT_LIST_HEAD(nidlist);
        while (src.ls_str) {
                rc = cfs_gettok(&src, ' ', &res);
-               if (rc == 0) {
+               if (!rc) {
                        cfs_free_nidlist(nidlist);
                        return 0;
                }
                rc = parse_nidrange(&res, nidlist);
-               if (rc == 0) {
+               if (!rc) {
                        cfs_free_nidlist(nidlist);
                        return 0;
                }
 {
        struct netstrfns *nf = nr->nr_netstrfns;
 
-       if (nr->nr_netnum == 0)
+       if (!nr->nr_netnum)
                return scnprintf(buffer, count, "@%s", nf->nf_name);
        else
                return scnprintf(buffer, count, "@%s%u",
        struct netstrfns *nf = nr->nr_netstrfns;
 
        list_for_each_entry(ar, addrranges, ar_link) {
-               if (i != 0)
+               if (i)
                        i += scnprintf(buffer + i, count - i, " ");
                i += nf->nf_print_addrlist(buffer + i, count - i,
                                           &ar->ar_numaddr_ranges);
                return 0;
 
        list_for_each_entry(nr, nidlist, nr_link) {
-               if (i != 0)
+               if (i)
                        i += scnprintf(buffer + i, count - i, " ");
 
-               if (nr->nr_all != 0) {
+               if (nr->nr_all) {
                        LASSERT(list_empty(&nr->nr_addrranges));
                        i += scnprintf(buffer + i, count - i, "*");
                        i += cfs_print_network(buffer + i, count - i, nr);
 
        list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
                list_for_each_entry(re, &el->el_exprs, re_link) {
-                       if (re->re_lo < min_addr || min_addr == 0)
+                       if (re->re_lo < min_addr || !min_addr)
                                min_addr = re->re_lo;
                        if (re->re_hi > max_addr)
                                max_addr = re->re_hi;
                if (netnum == -1)
                        netnum = nr->nr_netnum;
 
-               if (strcmp(lndname, nf->nf_name) != 0 ||
+               if (strcmp(lndname, nf->nf_name) ||
                    netnum != nr->nr_netnum)
                        return false;
        }
                list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
                        cfs_num_ar_min_max(ar, ¤t_start_nid,
                                           ¤t_end_nid);
-                       if (last_end_nid != 0 &&
+                       if (last_end_nid &&
                            (current_start_nid - last_end_nid != 1))
                                return false;
                        last_end_nid = current_end_nid;
                                                    re_link) {
                                        if (re->re_stride > 1)
                                                return false;
-                                       else if (last_hi != 0 &&
+                                       else if (last_hi &&
                                                 re->re_hi - last_hi != 1)
                                                return false;
                                        last_hi = re->re_hi;
                        last_diff = 0;
                        cfs_ip_ar_min_max(ar, ¤t_start_nid,
                                          ¤t_end_nid);
-                       if (last_end_nid != 0 &&
+                       if (last_end_nid &&
                            (current_start_nid - last_end_nid != 1))
                                return false;
                        last_end_nid = current_end_nid;
                list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
                        cfs_num_ar_min_max(ar, &tmp_min_addr,
                                           &tmp_max_addr);
-                       if (tmp_min_addr < min_addr || min_addr == 0)
+                       if (tmp_min_addr < min_addr || !min_addr)
                                min_addr = tmp_min_addr;
                        if (tmp_max_addr > max_addr)
                                max_addr = tmp_min_addr;
                list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
                        cfs_ip_ar_min_max(ar, &tmp_min_ip_addr,
                                          &tmp_max_ip_addr);
-                       if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0)
+                       if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr)
                                min_ip_addr = tmp_min_ip_addr;
                        if (tmp_max_ip_addr > max_ip_addr)
                                max_ip_addr = tmp_max_ip_addr;
        /* numeric IP? */
        if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
            n == nob &&
-           (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
-           (c & ~0xff) == 0 && (d & ~0xff) == 0) {
+           !(a & ~0xff) && !(b & ~0xff) &&
+           !(c & ~0xff) && !(d & ~0xff)) {
                *addr = ((a << 24) | (b << 16) | (c << 8) | d);
                return 1;
        }
                }
 
                rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
-               if (rc != 0)
+               if (rc)
                        goto out;
 
                list_add_tail(&el->el_link, list);
 
        list_for_each_entry(el, list, el_link) {
                LASSERT(j++ < 4);
-               if (i != 0)
+               if (i)
                        i += scnprintf(buffer + i, count - i, ".");
                i += cfs_expr_list_print(buffer + i, count - i, el);
        }
        int     rc;
 
        rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
-       if (rc == 0)
+       if (!rc)
                list_add_tail(&el->el_link, list);
 
        return rc;
        nf = libcfs_lnd2netstrfns(lnd);
        if (!nf)
                snprintf(buf, buf_size, "<%u:%u>", lnd, nnum);
-       else if (nnum == 0)
+       else if (!nnum)
                snprintf(buf, buf_size, "%s", nf->nf_name);
        else
                snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum);
 
                nf->nf_addr2str(addr, buf, buf_size);
                addr_len = strlen(buf);
-               if (nnum == 0)
+               if (!nnum)
                        snprintf(buf + addr_len, buf_size - addr_len, "@%s",
                                 nf->nf_name);
                else
        }
 
        snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
-                ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
-                (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
+                id.pid & LNET_PID_USERFLAG ? "U" : "",
+                id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid));
        return str;
 }
 EXPORT_SYMBOL(libcfs_id2str);
 
 
                lnet_net_lock(i);
 
-               for (j = 3; ptable->pt_number != 0; j++) {
+               for (j = 3; ptable->pt_number; j++) {
                        lnet_net_unlock(i);
 
-                       if ((j & (j - 1)) == 0) {
+                       if (!(j & (j - 1))) {
                                CDEBUG(D_WARNING,
                                       "Waiting for %d peers on peer table\n",
                                       ptable->pt_number);
 {
        struct lnet_peer_table *ptable;
 
-       LASSERT(lp->lp_refcount == 0);
-       LASSERT(lp->lp_rtr_refcount == 0);
+       LASSERT(!lp->lp_refcount);
+       LASSERT(!lp->lp_rtr_refcount);
        LASSERT(list_empty(&lp->lp_txq));
        LASSERT(list_empty(&lp->lp_hashlist));
-       LASSERT(lp->lp_txqnob == 0);
+       LASSERT(!lp->lp_txqnob);
 
        ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
        LASSERT(ptable->pt_number > 0);
        lnet_net_lock(cpt);
 
        rc = lnet_nid2peer_locked(&lp, nid, cpt);
-       if (rc != 0) {
+       if (rc) {
                lnet_net_unlock(cpt);
                CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
                return;
 
        lp->lp_timestamp = when;                /* update timestamp */
        lp->lp_ping_deadline = 0;              /* disable ping timeout */
 
-       if (lp->lp_alive_count != 0 &&    /* got old news */
+       if (lp->lp_alive_count &&         /* got old news */
            (!lp->lp_alive) == (!alive)) {      /* new date for old news */
                CDEBUG(D_NET, "Old news\n");
                return;
 
        /* lnet_net_lock must be exclusively locked */
        lp->lp_rtr_refcount--;
-       if (lp->lp_rtr_refcount == 0) {
+       if (!lp->lp_rtr_refcount) {
                LASSERT(list_empty(&lp->lp_routes));
 
                if (lp->lp_rcd) {
        /* len+1 positions to add a new entry, also prevents division by 0 */
        offset = cfs_rand() % (len + 1);
        list_for_each(e, &rnet->lrn_routes) {
-               if (offset == 0)
+               if (!offset)
                        break;
                offset--;
        }
        lnet_net_lock(LNET_LOCK_EX);
 
        rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
-       if (rc != 0) {
+       if (rc) {
                lnet_net_unlock(LNET_LOCK_EX);
 
                LIBCFS_FREE(route, sizeof(*route));
                        list_for_each(e2, &rnet->lrn_routes) {
                                route = list_entry(e2, lnet_route_t, lr_list);
 
-                               if (idx-- == 0) {
+                               if (!idx--) {
                                        *net      = rnet->lrn_net;
                                        *hops     = route->lr_hops;
                                        *priority = route->lr_priority;
        }
 
        gw->lp_ping_feats = info->pi_features;
-       if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
+       if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) {
                CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
                       libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
                return; /* nothing I can understand */
        }
 
-       if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
+       if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS))
                return; /* can't carry NI status info */
 
        list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
 
        if (event->type == LNET_EVENT_SEND) {
                lp->lp_ping_notsent = 0;
-               if (event->status == 0)
+               if (!event->status)
                        goto out;
        }
 
         * we ping alive routers to try to detect router death before
         * apps get burned).
         */
-       lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
+       lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
 
        /*
         * The router checker will wake up very shortly and do the
         * XXX If 'lp' stops being a router before then, it will still
         * have the notification pending!!!
         */
-       if (avoid_asym_router_failure && event->status == 0)
+       if (avoid_asym_router_failure && !event->status)
                lnet_parse_rc_info(rcd);
 
  out:
                list_for_each(entry, &the_lnet.ln_routers) {
                        rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
 
-                       if (rtr->lp_alive_count == 0) {
+                       if (!rtr->lp_alive_count) {
                                all_known = 0;
                                break;
                        }
 {
        lnet_route_t *rte;
 
-       if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
+       if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
                list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
                        if (rte->lr_net == net) {
                                rte->lr_downis = 0;
                CERROR("Can't bind MD: %d\n", rc);
                goto out;
        }
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        lnet_net_lock(gateway->lp_cpt);
        /* router table changed or someone has created rcd for this gateway */
        if (rcd) {
                if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
                        rc = LNetMDUnlink(rcd->rcd_mdh);
-                       LASSERT(rc == 0);
+                       LASSERT(!rc);
                }
                lnet_destroy_rc_data(rcd);
        }
 
        lnet_peer_addref_locked(rtr);
 
-       if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
+       if (rtr->lp_ping_deadline && /* ping timed out? */
            cfs_time_after(now, rtr->lp_ping_deadline))
                lnet_notify_locked(rtr, 1, 0, now);
 
               rtr->lp_ping_deadline, rtr->lp_ping_notsent,
               rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
 
-       if (secs != 0 && !rtr->lp_ping_notsent &&
+       if (secs && !rtr->lp_ping_notsent &&
            cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
                                             cfs_time_seconds(secs)))) {
                int rc;
 
                mdh = rcd->rcd_mdh;
 
-               if (rtr->lp_ping_deadline == 0) {
+               if (!rtr->lp_ping_deadline) {
                        rtr->lp_ping_deadline =
                                cfs_time_shift(router_ping_timeout);
                }
                             LNET_PROTO_PING_MATCHBITS, 0);
 
                lnet_net_lock(rtr->lp_cpt);
-               if (rc != 0)
+               if (rc)
                        rtr->lp_ping_notsent = 0; /* no event pending */
        }
 
        eqsz = 0;
        rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
                         &the_lnet.ln_rc_eqh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
                return -ENOMEM;
        }
                /* block until event callback signals exit */
                down(&the_lnet.ln_rc_signal);
                rc = LNetEQFree(the_lnet.ln_rc_eqh);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
                the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
                return -ENOMEM;
        }
        LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
 
        rc = LNetEQFree(the_lnet.ln_rc_eqh);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 }
 
 static void
        int nbuffers = 0;
        lnet_rtrbuf_t *rb;
 
-       if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
+       if (!rbp->rbp_nbuffers) /* not initialized or already freed */
                return;
 
        LASSERT(list_empty(&rbp->rbp_msgs));
        lnet_rtrbuf_t *rb;
        int i;
 
-       if (rbp->rbp_nbuffers != 0) {
+       if (rbp->rbp_nbuffers) {
                LASSERT(rbp->rbp_nbuffers == nbufs);
                return 0;
        }
        cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
                lnet_rtrpool_init(&rtrp[0], 0);
                rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
-               if (rc != 0)
+               if (rc)
                        goto failed;
 
                lnet_rtrpool_init(&rtrp[1], small_pages);
                rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
-               if (rc != 0)
+               if (rc)
                        goto failed;
 
                lnet_rtrpool_init(&rtrp[2], large_pages);
                rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
-               if (rc != 0)
+               if (rc)
                        goto failed;
        }
 
 
 
        LASSERT(!write);
 
-       if (*lenp == 0)
+       if (!*lenp)
                return 0;
 
        LIBCFS_ALLOC(tmpstr, tmpsiz);
 
        s = tmpstr; /* points to current position in tmpstr[] */
 
-       if (*ppos == 0) {
+       if (!*ppos) {
                s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
                              the_lnet.ln_routing ? "enabled" : "disabled");
                LASSERT(tmpstr + tmpsiz - s > 0);
                                        lnet_route_t *re =
                                                list_entry(r, lnet_route_t,
                                                           lr_list);
-                                       if (skip == 0) {
+                                       if (!skip) {
                                                route = re;
                                                break;
                                        }
 
        LIBCFS_FREE(tmpstr, tmpsiz);
 
-       if (rc == 0)
+       if (!rc)
                *lenp = len;
 
        return rc;
 
        LASSERT(!write);
 
-       if (*lenp == 0)
+       if (!*lenp)
                return 0;
 
        LIBCFS_ALLOC(tmpstr, tmpsiz);
 
        s = tmpstr; /* points to current position in tmpstr[] */
 
-       if (*ppos == 0) {
+       if (!*ppos) {
                s += snprintf(s, tmpstr + tmpsiz - s,
                              "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
                              "ref", "rtr_ref", "alive_cnt", "state",
                        lnet_peer_t *lp = list_entry(r, lnet_peer_t,
                                                     lp_rtr_list);
 
-                       if (skip == 0) {
+                       if (!skip) {
                                peer = lp;
                                break;
                        }
                        lnet_route_t *rtr;
 
                        if ((peer->lp_ping_feats &
-                            LNET_PING_FEAT_NI_STATUS) != 0) {
+                            LNET_PING_FEAT_NI_STATUS)) {
                                list_for_each_entry(rtr, &peer->lp_routes,
                                                    lr_gwlist) {
                                        /*
                                         * downis on any route should be the
                                         * number of downis on the gateway
                                         */
-                                       if (rtr->lr_downis != 0) {
+                                       if (rtr->lr_downis) {
                                                down_ni = rtr->lr_downis;
                                                break;
                                        }
                                }
                        }
 
-                       if (deadline == 0)
+                       if (!deadline)
                                s += snprintf(s, tmpstr + tmpsiz - s,
                                              "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
                                              nrefs, nrtrrefs, alive_cnt,
 
        LIBCFS_FREE(tmpstr, tmpsiz);
 
-       if (rc == 0)
+       if (!rc)
                *lenp = len;
 
        return rc;
        CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
        LASSERT(!write);
 
-       if (*lenp == 0)
+       if (!*lenp)
                return 0;
 
        if (cpt >= LNET_CPT_NUMBER) {
 
        s = tmpstr; /* points to current position in tmpstr[] */
 
-       if (*ppos == 0) {
+       if (!*ppos) {
                s += snprintf(s, tmpstr + tmpsiz - s,
                              "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
                              "nid", "refs", "state", "last", "max",
                        while (p != &ptable->pt_hash[hash]) {
                                lnet_peer_t *lp = list_entry(p, lnet_peer_t,
                                                             lp_hashlist);
-                               if (skip == 0) {
+                               if (!skip) {
                                        peer = lp;
 
                                        /*
 
        LIBCFS_FREE(tmpstr, tmpsiz);
 
-       if (rc == 0)
+       if (!rc)
                *lenp = len;
 
        return rc;
 
        LASSERT(!write);
 
-       if (*lenp == 0)
+       if (!*lenp)
                return 0;
 
        LIBCFS_ALLOC(tmpstr, tmpsiz);
 
        s = tmpstr; /* points to current position in tmpstr[] */
 
-       if (*ppos == 0) {
+       if (!*ppos) {
                s += snprintf(s, tmpstr + tmpsiz - s,
                              "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
                              "nid", "status", "alive", "refs", "peer",
                while (n != &the_lnet.ln_nis) {
                        lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
 
-                       if (skip == 0) {
+                       if (!skip) {
                                ni = a_ni;
                                break;
                        }
                                if (j == ni->ni_ncpts)
                                        continue;
 
-                               if (i != 0)
+                               if (i)
                                        lnet_net_lock(i);
 
                                s += snprintf(s, tmpstr + tmpsiz - s,
                                              tq->tq_credits_max,
                                              tq->tq_credits,
                                              tq->tq_credits_min);
-                               if (i != 0)
+                               if (i)
                                        lnet_net_unlock(i);
                        }
                        LASSERT(tmpstr + tmpsiz - s > 0);
 
        LIBCFS_FREE(tmpstr, tmpsiz);
 
-       if (rc == 0)
+       if (!rc)
                *lenp = len;
 
        return rc;
        rc = -EINVAL;
        lnet_res_lock(0);
        for (i = 0; portal_rotors[i].pr_name; i++) {
-               if (strncasecmp(portal_rotors[i].pr_name, tmp,
-                               strlen(portal_rotors[i].pr_name)) == 0) {
+               if (!strncasecmp(portal_rotors[i].pr_name, tmp,
+                                strlen(portal_rotors[i].pr_name))) {
                        portal_rotor = portal_rotors[i].pr_value;
                        rc = 0;
                        break;
 
        LASSERT(sn);
        LASSERT(tsi->tsi_is_client);
 
-       if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+       if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
                test_bulk_req_t  *breq = &tsi->tsi_u.bulk_v0;
 
                opc   = breq->blk_opc;
                 * I should never get this step if it's unknown feature
                 * because make_session will reject unknown feature
                 */
-               LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+               LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
                opc   = breq->blk_opc;
                flags = breq->blk_flags;
 
        ktime_get_ts64(&ts);
 
-       if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0)
+       if (!((ts.tv_nsec / NSEC_PER_USEC) & 1))
                return 0;
 
        return brw_inject_errors--;
 
        for (i = 0; i < bk->bk_niov; i++) {
                pg = bk->bk_iovs[i].kiov_page;
-               if (brw_check_page(pg, pattern, magic) != 0) {
+               if (brw_check_page(pg, pattern, magic)) {
                        CERROR("Bulk page %p (%d/%d) is corrupted!\n",
                               pg, i, bk->bk_niov);
                        return 1;
        LASSERT(sn);
        LASSERT(bulk);
 
-       if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+       if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
                test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
 
                opc   = breq->blk_opc;
                 * I should never get this step if it's unknown feature
                 * because make_session will reject unknown feature
                 */
-               LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+               LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
                opc   = breq->blk_opc;
                flags = breq->blk_flags;
        }
 
        rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
 
        LASSERT(sn);
 
-       if (rpc->crpc_status != 0) {
+       if (rpc->crpc_status) {
                CERROR("BRW RPC to %s failed with %d\n",
                       libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
                if (!tsi->tsi_stopping) /* rpc could have been aborted */
               "BRW RPC to %s finished with brw_status: %d\n",
               libcfs_id2str(rpc->crpc_dest), reply->brw_status);
 
-       if (reply->brw_status != 0) {
+       if (reply->brw_status) {
                atomic_inc(&sn->sn_brw_errors);
                rpc->crpc_status = -(int)reply->brw_status;
                goto out;
        if (reqst->brw_rw == LST_BRW_WRITE)
                goto out;
 
-       if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
+       if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) {
                CERROR("Bulk data from %s is corrupted!\n",
                       libcfs_id2str(rpc->crpc_dest));
                atomic_inc(&sn->sn_brw_errors);
        if (!blk)
                return;
 
-       if (rpc->srpc_status != 0)
+       if (rpc->srpc_status)
                CERROR("Bulk transfer %s %s has failed: %d\n",
                       blk->bk_sink ? "from" : "to",
                       libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
        reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
        reqst = &reqstmsg->msg_body.brw_reqst;
 
-       if (status != 0) {
+       if (status) {
                CERROR("BRW bulk %s failed for RPC from %s: %d\n",
                       reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
                       libcfs_id2str(rpc->srpc_peer), status);
        if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
                __swab64s(&magic);
 
-       if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
+       if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) {
                CERROR("Bulk data from %s is corrupted!\n",
                       libcfs_id2str(rpc->srpc_peer));
                reply->brw_status = EBADMSG;
                return 0;
        }
 
-       if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+       if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
                replymsg->msg_ses_feats = LST_FEATS_MASK;
                reply->brw_status = EPROTO;
                return 0;
        }
 
-       if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+       if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
                /* compat with old version */
-               if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
+               if (reqst->brw_len & ~CFS_PAGE_MASK) {
                        reply->brw_status = EINVAL;
                        return 0;
                }
 
        replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
 
-       if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) {
+       if (!reqst->brw_len || npg > LNET_MAX_IOV) {
                reply->brw_status = EINVAL;
                return 0;
        }
        rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
                             reqst->brw_len,
                             reqst->brw_rw == LST_BRW_WRITE);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        if (reqst->brw_rw == LST_BRW_READ)
 
        int rc;
 
        if (!args->lstio_ses_idp || /* address for output sid */
-           args->lstio_ses_key   == 0 ||    /* no key is specified */
+           !args->lstio_ses_key ||    /* no key is specified */
            !args->lstio_ses_namep || /* session name */
            args->lstio_ses_nmlen <= 0 ||
            args->lstio_ses_nmlen > LST_NAME_SIZE)
                              args->lstio_grp_resultp);
 
        LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
-       if (rc == 0 &&
+       if (!rc &&
            copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
                return -EINVAL;
        }
 
        LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 
-       if (rc != 0)
+       if (rc)
                return rc;
 
        if (args->lstio_grp_dentsp &&
 
        LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
 
-       if (rc != 0)
+       if (rc)
                return rc;
 
        if (args->lstio_bat_dentsp &&
            args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
                return -EINVAL;
 
-       if (args->lstio_tes_loop == 0 || /* negative is infinite */
+       if (!args->lstio_tes_loop || /* negative is infinite */
            args->lstio_tes_concur <= 0 ||
            args->lstio_tes_dist <= 0 ||
            args->lstio_tes_span <= 0)
                             args->lstio_tes_param_len,
                             &ret, args->lstio_tes_resultp);
 
-       if (ret != 0)
+       if (ret)
                rc = (copy_to_user(args->lstio_tes_retp, &ret,
                                   sizeof(ret))) ? -EFAULT : 0;
 out:
 
        /* not an orphan RPC */
        crpc->crp_finished = 1;
 
-       if (crpc->crp_stamp == 0) {
+       if (!crpc->crp_stamp) {
                /* not aborted */
-               LASSERT(crpc->crp_status == 0);
+               LASSERT(!crpc->crp_status);
 
                crpc->crp_stamp  = cfs_time_current();
                crpc->crp_status = rpc->crpc_status;
        }
 
        rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
-       if (rc == 0) {
+       if (!rc) {
                *crpcpp = crpc;
                return 0;
        }
                spin_lock(&rpc->crpc_lock);
 
                if (!crpc->crp_posted || /* not posted */
-                   crpc->crp_stamp != 0) { /* rpc done or aborted already */
-                       if (crpc->crp_stamp == 0) {
+                   crpc->crp_stamp) { /* rpc done or aborted already */
+                       if (!crpc->crp_stamp) {
                                crpc->crp_stamp = cfs_time_current();
                                crpc->crp_status = -EINTR;
                        }
            !list_empty(&trans->tas_olink)) /* Not an end session RPC */
                return 1;
 
-       return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0;
+       return !atomic_read(&trans->tas_remaining) ? 1 : 0;
 }
 
 int
        if (console_session.ses_shutdown)
                rc = -ESHUTDOWN;
 
-       if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) {
+       if (rc || atomic_read(&trans->tas_remaining)) {
                /* treat short timeout as canceled */
                if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
                        rc = -EINTR;
        srpc_generic_reply_t *rep;
 
        LASSERT(nd && rpc);
-       LASSERT(crpc->crp_stamp != 0);
+       LASSERT(crpc->crp_stamp);
 
-       if (crpc->crp_status != 0) {
+       if (crpc->crp_status) {
                *msgpp = NULL;
                return crpc->crp_status;
        }
        list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
                lstcon_rpc_stat_total(stat, 1);
 
-               LASSERT(crpc->crp_stamp != 0);
+               LASSERT(crpc->crp_stamp);
 
                error = lstcon_rpc_get_reply(crpc, &rep);
-               if (error != 0) {
+               if (error) {
                        lstcon_rpc_stat_failure(stat, 1);
-                       if (stat->trs_rpc_errno == 0)
+                       if (!stat->trs_rpc_errno)
                                stat->trs_rpc_errno = -error;
 
                        continue;
                lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
        }
 
-       if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) {
+       if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) {
                stat->trs_fwk_errno =
                      lstcon_session_feats_check(trans->tas_features);
        }
 
                ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
 
-               LASSERT(crpc->crp_stamp != 0);
+               LASSERT(crpc->crp_stamp);
 
                error = lstcon_rpc_get_reply(crpc, &msg);
 
                                 sizeof(error)))
                        return -EFAULT;
 
-               if (error != 0)
+               if (error)
                        continue;
 
                /* RPC is done */
 
                error = readent(trans->tas_opc, msg, ent);
 
-               if (error != 0)
+               if (error)
                        return error;
        }
 
                 * user wait for them, just abandon them, they will be recycled
                 * in callback
                 */
-               LASSERT(crpc->crp_status != 0);
+               LASSERT(crpc->crp_status);
 
                crpc->crp_node  = NULL;
                crpc->crp_trans = NULL;
                atomic_dec(&trans->tas_remaining);
        }
 
-       LASSERT(atomic_read(&trans->tas_remaining) == 0);
+       LASSERT(!atomic_read(&trans->tas_remaining));
 
        list_del(&trans->tas_link);
        if (!list_empty(&trans->tas_olink))
        case LST_TRANS_SESNEW:
                rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
                                     feats, 0, 0, crpc);
-               if (rc != 0)
+               if (rc)
                        return rc;
 
                msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
        case LST_TRANS_SESEND:
                rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
                                     feats, 0, 0, crpc);
-               if (rc != 0)
+               if (rc)
                        return rc;
 
                rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
        int rc;
 
        rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
        int                 rc;
 
        rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
            transop != LST_TRANS_TSBSTOP)
                return 0;
 
-       LASSERT(tsb->tsb_index == 0);
+       LASSERT(!tsb->tsb_index);
 
        batch = (lstcon_batch_t *)tsb;
        brq->bar_arg = batch->bat_arg;
        int                rc;
 
        rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
 
        if (transop == LST_TRANS_TSBCLIADD) {
                npg = sfw_id_pages(test->tes_span);
-               nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
+               nob = !(feats & LST_FEAT_BULK_LEN) ?
                      npg * PAGE_CACHE_SIZE :
                      sizeof(lnet_process_id_packed_t) * test->tes_span;
        }
 
        rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        trq  = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
 
                        LASSERT(nob > 0);
 
-                       len = (feats & LST_FEAT_BULK_LEN) == 0 ?
+                       len = !(feats & LST_FEAT_BULK_LEN) ?
                              PAGE_CACHE_SIZE :
                              min_t(int, nob, PAGE_CACHE_SIZE);
                        nob -= len;
                                          test->tes_dist,
                                          test->tes_span,
                                          npg, &bulk->bk_iovs[0]);
-               if (rc != 0) {
+               if (rc) {
                        lstcon_rpc_put(*crpc);
                        return rc;
                }
 
        case LST_TEST_BULK:
                trq->tsr_service = SRPC_SERVICE_BRW;
-               if ((feats & LST_FEAT_BULK_LEN) == 0) {
+               if (!(feats & LST_FEAT_BULK_LEN)) {
                        rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *)
                                                    &test->tes_param[0], trq);
                } else {
        srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
        int                status   = mksn_rep->mksn_status;
 
-       if (status == 0 &&
-           (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+       if (!status &&
+           (reply->msg_ses_feats & ~LST_FEATS_MASK)) {
                mksn_rep->mksn_status = EPROTO;
                status = EPROTO;
        }
                        reply->msg_ses_feats);
        }
 
-       if (status != 0)
+       if (status)
                return status;
 
        if (!trans->tas_feats_updated) {
                status = EPROTO;
        }
 
-       if (status == 0) {
+       if (!status) {
                /* session timeout on remote node */
                nd->nd_timeout = mksn_rep->mksn_timeout;
        }
        switch (trans->tas_opc) {
        case LST_TRANS_SESNEW:
                rc = lstcon_sesnew_stat_reply(trans, nd, msg);
-               if (rc == 0) {
+               if (!rc) {
                        lstcon_sesop_stat_success(stat, 1);
                        return;
                }
        case LST_TRANS_SESEND:
                rmsn_rep = &msg->msg_body.rmsn_reply;
                /* ESRCH is not an error for end session */
-               if (rmsn_rep->rmsn_status == 0 ||
+               if (!rmsn_rep->rmsn_status ||
                    rmsn_rep->rmsn_status == ESRCH) {
                        lstcon_sesop_stat_success(stat, 1);
                        return;
        case LST_TRANS_TSBSTOP:
                bat_rep = &msg->msg_body.bat_reply;
 
-               if (bat_rep->bar_status == 0) {
+               if (!bat_rep->bar_status) {
                        lstcon_tsbop_stat_success(stat, 1);
                        return;
                }
        case LST_TRANS_TSBSRVQRY:
                bat_rep = &msg->msg_body.bat_reply;
 
-               if (bat_rep->bar_active != 0)
+               if (bat_rep->bar_active)
                        lstcon_tsbqry_stat_run(stat, 1);
                else
                        lstcon_tsbqry_stat_idle(stat, 1);
 
-               if (bat_rep->bar_status == 0)
+               if (!bat_rep->bar_status)
                        return;
 
                lstcon_tsbqry_stat_failure(stat, 1);
        case LST_TRANS_TSBSRVADD:
                test_rep = &msg->msg_body.tes_reply;
 
-               if (test_rep->tsr_status == 0) {
+               if (!test_rep->tsr_status) {
                        lstcon_tsbop_stat_success(stat, 1);
                        return;
                }
        case LST_TRANS_STATQRY:
                stat_rep = &msg->msg_body.stat_reply;
 
-               if (stat_rep->str_status == 0) {
+               if (!stat_rep->str_status) {
                        lstcon_statqry_stat_success(stat, 1);
                        return;
                }
                LBUG();
        }
 
-       if (stat->trs_fwk_errno == 0)
+       if (!stat->trs_fwk_errno)
                stat->trs_fwk_errno = rc;
 
        return;
        /* Creating session RPG for list of nodes */
 
        rc = lstcon_rpc_trans_prep(translist, transop, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction %d: %d\n", transop, rc);
                return rc;
        }
                rc = !condition ? 1 :
                     condition(transop, ndl->ndl_node, arg);
 
-               if (rc == 0)
+               if (!rc)
                        continue;
 
                if (rc < 0) {
                        break;
                }
 
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Failed to create RPC for transaction %s: %d\n",
                               lstcon_rpc_trans_name(transop), rc);
                        break;
                lstcon_rpc_trans_addreq(trans, rpc);
        }
 
-       if (rc == 0) {
+       if (!rc) {
                *transpp = trans;
                return 0;
        }
 
                        rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
                                                trans->tas_features, &crpc);
-                       if (rc != 0) {
+                       if (rc) {
                                CERROR("Out of memory\n");
                                break;
                        }
 
                rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
                                     trans->tas_features, 0, 0, 1, crpc);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Out of memory\n");
                        break;
                }
        int rc;
 
        LASSERT(list_empty(&console_session.ses_rpc_freelist));
-       LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+       LASSERT(!atomic_read(&console_session.ses_rpc_counter));
 
        rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
                                   &console_session.ses_ping);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Failed to create console pinger\n");
                return rc;
        }
 
        spin_lock(&console_session.ses_rpc_lock);
 
-       lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+       lst_wait_until(!atomic_read(&console_session.ses_rpc_counter),
                       console_session.ses_rpc_lock,
                       "Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
                       atomic_read(&console_session.ses_rpc_counter));
 lstcon_rpc_module_fini(void)
 {
        LASSERT(list_empty(&console_session.ses_rpc_freelist));
-       LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+       LASSERT(!atomic_read(&console_session.ses_rpc_counter));
 }
 
                return 0;
        }
 
-       if (create == 0)
+       if (!create)
                return -ENOENT;
 
        /* find or create in session hash */
        rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
        lstcon_ndlink_t *tmp;
 
        list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
-               if ((ndl->ndl_node->nd_state & keep) == 0)
+               if (!(ndl->ndl_node->nd_state & keep))
                        lstcon_group_ndlink_release(grp, ndl);
        }
 }
        lstcon_group_t *grp;
 
        list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
-               if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
+               if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
                        continue;
 
                lstcon_group_addref(grp);  /* +1 ref for caller */
        int rc;
 
        rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        if (!list_empty(&(*ndlpp)->ndl_link))
        int rc;
 
        rc = lstcon_group_alloc(NULL, &tmp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Out of memory\n");
                return -ENOMEM;
        }
 
                /* skip if it's in this group already */
                rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
-               if (rc == 0)
+               if (!rc)
                        continue;
 
                /* add to tmp group */
                rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't create ndlink, out of memory\n");
                        break;
                }
        }
 
-       if (rc != 0) {
+       if (rc) {
                lstcon_group_decref(tmp);
                return rc;
        }
        rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
                                     &tmp->grp_trans_list, LST_TRANS_SESNEW,
                                     tmp, lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                lstcon_group_decref(tmp);
                return rc;
        /* End session and remove node from the group */
 
        rc = lstcon_group_alloc(NULL, &tmp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Out of memory\n");
                return -ENOMEM;
        }
                }
 
                /* move node to tmp group */
-               if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0)
+               if (!lstcon_group_ndlink_find(grp, id, &ndl, 0))
                        lstcon_group_ndlink_move(grp, tmp, ndl);
        }
 
        rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
                                     &tmp->grp_trans_list, LST_TRANS_SESEND,
                                     tmp, lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                goto error;
        }
        lstcon_group_t *grp;
        int rc;
 
-       rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0;
-       if (rc != 0) {
+       rc = lstcon_group_find(name, &grp) ? 0: -EEXIST;
+       if (rc) {
                /* find a group with same name */
                lstcon_group_decref(grp);
                return rc;
        }
 
        rc = lstcon_group_alloc(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't allocate descriptor for group %s\n", name);
                return -ENOMEM;
        }
        LASSERT(ids_up);
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group %s\n", name);
                return rc;
        }
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group: %s\n", name);
                return rc;
        }
        rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
                                     &grp->grp_trans_list, LST_TRANS_SESEND,
                                     grp, lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                lstcon_group_decref(grp);
                return rc;
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group %s\n", name);
                return rc;
        }
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group: %s\n", name);
                return rc;
        }
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group: %s\n", name);
                return rc;
        }
        rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
                                     &grp->grp_trans_list, LST_TRANS_SESNEW,
                                     grp, lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                /* local error, return */
                CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
                lstcon_group_decref(grp);
        LASSERT(name_up);
 
        list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
-               if (index-- == 0) {
+               if (!index--) {
                        return copy_to_user(name_up, grp->grp_name, len) ?
                               -EFAULT : 0;
                }
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group %s\n", name);
                return rc;
        }
        lstcon_batch_t *bat;
 
        list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
-               if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
+               if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
                        *batpp = bat;
                        return 0;
                }
        int i;
        int rc;
 
-       rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0;
-       if (rc != 0) {
+       rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0;
+       if (rc) {
                CDEBUG(D_NET, "Batch %s already exists\n", name);
                return rc;
        }
        LASSERT(index >= 0);
 
        list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
-               if (index-- == 0) {
+               if (!index--) {
                        return copy_to_user(name_up, bat->bat_name, len) ?
                               -EFAULT : 0;
                }
        int rc;
 
        rc = lstcon_batch_find(name, &bat);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find batch %s\n", name);
                return -ENOENT;
        }
        rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
                                     &bat->bat_trans_list, transop,
                                     bat, lstcon_batrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
        lstcon_batch_t *bat;
        int rc;
 
-       if (lstcon_batch_find(name, &bat) != 0) {
+       if (lstcon_batch_find(name, &bat)) {
                CDEBUG(D_NET, "Can't find batch %s\n", name);
                return -ENOENT;
        }
        rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
 
        /* mark batch as running if it's started in any node */
-       if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0)
+       if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0))
                bat->bat_state = LST_BATCH_RUNNING;
 
        return rc;
        lstcon_batch_t *bat;
        int rc;
 
-       if (lstcon_batch_find(name, &bat) != 0) {
+       if (lstcon_batch_find(name, &bat)) {
                CDEBUG(D_NET, "Can't find batch %s\n", name);
                return -ENOENT;
        }
        rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
 
        /* mark batch as stopped if all RPCs finished */
-       if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0)
+       if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0))
                bat->bat_state = LST_BATCH_IDLE;
 
        return rc;
 
        LASSERT(nd->nd_id.nid != LNET_NID_ANY);
 
-       if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
+       if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1))
                return -ENOMEM;
 
        if (list_empty(&ndl->ndl_link))
        rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
                                     &test->tes_trans_list, transop,
                                     test, lstcon_testrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
 
        lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
 
-       if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
-           lstcon_trans_stat()->trs_fwk_errno != 0) {
+       if (lstcon_trans_stat()->trs_rpc_errno ||
+           lstcon_trans_stat()->trs_fwk_errno) {
                lstcon_rpc_trans_interpreter(trans, result_up, NULL);
 
                lstcon_rpc_trans_destroy(trans);
        int rc;
 
        rc = lstcon_batch_find(name, batch);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find batch %s\n", name);
                return rc;
        }
        lstcon_ndlink_t *ndl;
 
        rc = lstcon_group_find(name, grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "can't find group %s\n", name);
                return rc;
        }
         * active node
         */
        rc = lstcon_verify_batch(batch_name, &batch);
-       if (rc != 0)
+       if (rc)
                goto out;
 
        rc = lstcon_verify_group(src_name, &src_grp);
-       if (rc != 0)
+       if (rc)
                goto out;
 
        rc = lstcon_verify_group(dst_name, &dst_grp);
-       if (rc != 0)
+       if (rc)
                goto out;
 
        if (dst_grp->grp_userland)
 
        rc = lstcon_test_nodes_add(test, result_up);
 
-       if (rc != 0)
+       if (rc)
                goto out;
 
-       if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
-           lstcon_trans_stat()->trs_fwk_errno != 0)
+       if (lstcon_trans_stat()->trs_rpc_errno ||
+           lstcon_trans_stat()->trs_fwk_errno)
                CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
                       batch_name);
 
        int rc;
 
        rc = lstcon_batch_find(name, &batch);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find batch: %s\n", name);
                return rc;
        }
 
-       if (testidx == 0) {
+       if (!testidx) {
                translist = &batch->bat_trans_list;
                ndlist    = &batch->bat_cli_list;
                hdr       = &batch->bat_hdr;
        } else {
                /* query specified test only */
                rc = lstcon_test_find(batch, testidx, &test);
-               if (rc != 0) {
+               if (rc) {
                        CDEBUG(D_NET, "Can't find test: %d\n", testidx);
                        return rc;
                }
 
        rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
                                     lstcon_batrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
 
        lstcon_rpc_trans_postwait(trans, timeout);
 
-       if (testidx == 0 && /* query a batch, not a test */
-           lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 &&
-           lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) {
+       if (!testidx && /* query a batch, not a test */
+           !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
+           !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
                /* all RPCs finished, and no active test */
                batch->bat_state = LST_BATCH_IDLE;
        }
        srpc_counters_t __user *srpc_stat;
        lnet_counters_t __user *lnet_stat;
 
-       if (rep->str_status != 0)
+       if (rep->str_status)
                return 0;
 
        sfwk_stat = (sfw_counters_t __user *)&ent_up->rpe_payload[0];
 
        rc = lstcon_rpc_trans_ndlist(ndlist, &head,
                                     LST_TRANS_STATQRY, NULL, NULL, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
        int rc;
 
        rc = lstcon_group_find(grp_name, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Can't find group %s\n", grp_name);
                return rc;
        }
        int rc;
 
        rc = lstcon_group_alloc(NULL, &tmp);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Out of memory\n");
                return -ENOMEM;
        }
 
                /* add to tmp group */
                rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
-               if (rc != 0) {
+               if (rc) {
                        CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
                               "Failed to find or create %s: %d\n",
                               libcfs_id2str(id), rc);
                }
        }
 
-       if (rc != 0) {
+       if (rc) {
                lstcon_group_decref(tmp);
                return rc;
        }
 
        rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
                                     NULL, lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
        int rc;
 
        rc = lstcon_batch_find(name, &bat);
-       if (rc != 0)
+       if (rc)
                return -ENOENT;
 
        rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
        int rc;
 
        rc = lstcon_group_find(name, &grp);
-       if (rc != 0)
+       if (rc)
                return -ENOENT;
 
        rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
        int rc;
 
        rc = lstcon_group_alloc(NULL, &grp);
-       if (rc != 0) {
+       if (rc) {
                CDEBUG(D_NET, "Out of memory\n");
                return rc;
        }
 
                /* node is added to tmp group */
                rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Can't create node link\n");
                        break;
                }
        }
 
-       if (rc != 0) {
+       if (rc) {
                lstcon_group_decref(grp);
                return rc;
        }
                rc = lstcon_session_end();
 
                /* lstcon_session_end() only return local error */
-               if  (rc != 0)
+               if  (rc)
                        return rc;
        }
 
-       if ((feats & ~LST_FEATS_MASK) != 0) {
+       if (feats & ~LST_FEATS_MASK) {
                CNETERR("Unknown session features %x\n",
                        (feats & ~LST_FEATS_MASK));
                return -EINVAL;
                sizeof(console_session.ses_name));
 
        rc = lstcon_batch_add(LST_DEFAULT_BATCH);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        rc = lstcon_rpc_pinger_start();
-       if (rc != 0) {
+       if (rc) {
                lstcon_batch_t *bat = NULL;
 
                lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
                return rc;
        }
 
-       if (copy_to_user(sid_up, &console_session.ses_id,
-                        sizeof(lst_sid_t)) == 0)
+       if (!copy_to_user(sid_up, &console_session.ses_id,
+                         sizeof(lst_sid_t)))
                return rc;
 
        lstcon_session_end();
        rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
                                     NULL, LST_TRANS_SESEND, NULL,
                                     lstcon_sesrpc_condition, &trans);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Can't create transaction: %d\n", rc);
                return rc;
        }
 {
        int rc = 0;
 
-       if ((feats & ~LST_FEATS_MASK) != 0) {
+       if (feats & ~LST_FEATS_MASK) {
                CERROR("Can't support these features: %x\n",
                       (feats & ~LST_FEATS_MASK));
                return -EPROTO;
 
        spin_unlock(&console_session.ses_rpc_lock);
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("remote features %x do not match with session features %x of console\n",
                       feats, console_session.ses_features);
        }
                goto out;
        }
 
-       if (lstcon_session_feats_check(req->msg_ses_feats) != 0) {
+       if (lstcon_session_feats_check(req->msg_ses_feats)) {
                jrep->join_status = EPROTO;
                goto out;
        }
                goto out;
        }
 
-       if (lstcon_group_find(jreq->join_group, &grp) != 0) {
+       if (lstcon_group_find(jreq->join_group, &grp)) {
                rc = lstcon_group_alloc(jreq->join_group, &grp);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Out of memory\n");
                        goto out;
                }
        }
 
        rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
-       if (rc == 0) {
+       if (!rc) {
                jrep->join_status = EEXIST;
                goto out;
        }
 
        rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Out of memory\n");
                goto out;
        }
        ndl->ndl_node->nd_state   = LST_NODE_ACTIVE;
        ndl->ndl_node->nd_timeout = console_session.ses_timeout;
 
-       if (grp->grp_userland == 0)
+       if (!grp->grp_userland)
                grp->grp_userland = 1;
 
        strlcpy(jrep->join_session, console_session.ses_name,
 
        rc = srpc_add_service(&lstcon_acceptor_service);
        LASSERT(rc != -EBUSY);
-       if (rc != 0) {
+       if (rc) {
                LIBCFS_FREE(console_session.ses_ndl_hash,
                            sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
                return rc;
 
        rc = srpc_service_add_buffers(&lstcon_acceptor_service,
                                      lstcon_acceptor_service.sv_wi_total);
-       if (rc != 0) {
+       if (rc) {
                rc = -ENOMEM;
                goto out;
        }
 
        rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
 
-       if (rc == 0) {
+       if (!rc) {
                lstcon_rpc_module_init();
                return 0;
        }
 
        __swab64s(&(lc).route_length);  \
 } while (0)
 
-#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t)      (atomic_read(&(t)->tsi_nactive))
+#define sfw_batch_active(b)     (atomic_read(&(b)->bat_nactive))
 
 static struct smoketest_framework {
        struct list_head  fw_zombie_rpcs;     /* RPCs to be recycled */
 
        LASSERT(!sfw_data.fw_shuttingdown);
 
-       if (!sn || sn->sn_timeout == 0)
+       if (!sn || !sn->sn_timeout)
                return;
 
        LASSERT(!sn->sn_timer_active);
        if (!sn || !sn->sn_timer_active)
                return 0;
 
-       LASSERT(sn->sn_timeout != 0);
+       LASSERT(sn->sn_timeout);
 
        if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
                sn->sn_timer_active = 0;
                }
        }
 
-       if (nactive != 0)
+       if (nactive)
                return;   /* wait for active batches to stop */
 
        list_del_init(&sn->sn_list);
 static void
 sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
 {
-       LASSERT(rpc->crpc_bulk.bk_niov == 0);
+       LASSERT(!rpc->crpc_bulk.bk_niov);
        LASSERT(list_empty(&rpc->crpc_list));
-       LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+       LASSERT(!atomic_read(&rpc->crpc_refcount));
 
        CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
               rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
         * console's responsibility to make sure all nodes in a session have
         * same feature mask.
         */
-       if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+       if (msg->msg_ses_feats & ~LST_FEATS_MASK) {
                reply->mksn_status = EPROTO;
                return 0;
        }
        }
 
        rc = srpc_service_add_buffers(svc, nbuf);
-       if (rc != 0) {
+       if (rc) {
                CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
                      svc->sv_name, nbuf, rc);
                /*
        LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
 
        if (req->tsr_service == SRPC_SERVICE_BRW) {
-               if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+               if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
                        test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
 
                        __swab32s(&bulk->blk_opc);
        tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
 
        rc = sfw_load_test(tsi);
-       if (rc != 0) {
+       if (rc) {
                LIBCFS_FREE(tsi, sizeof(*tsi));
                return rc;
        }
        }
 
        rc = tsi->tsi_ops->tso_init(tsi);
-       if (rc == 0) {
+       if (!rc) {
                list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
                return 0;
        }
 
 error:
-       LASSERT(rc != 0);
+       LASSERT(rc);
        sfw_destroy_test_instance(tsi);
        return rc;
 }
        list_del_init(&rpc->crpc_list);
 
        /* batch is stopping or loop is done or get error */
-       if (tsi->tsi_stopping ||
-           tsu->tsu_loop == 0 ||
-           (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
+       if (tsi->tsi_stopping || !tsu->tsu_loop ||
+           (rpc->crpc_status && tsi->tsi_stoptsu_onerr))
                done = 1;
 
        /* dec ref for poster */
 
        LASSERT(wi == &tsu->tsu_worker);
 
-       if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
+       if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
                LASSERT(!rpc);
                goto test_done;
        }
        if (testidx < 0)
                return -EINVAL;
 
-       if (testidx == 0) {
+       if (!testidx) {
                reply->bar_active = atomic_read(&tsb->bat_nactive);
                return 0;
        }
        request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
        reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
 
-       if (request->tsr_loop == 0 ||
-           request->tsr_concur == 0 ||
+       if (!request->tsr_loop ||
+           !request->tsr_concur ||
            request->tsr_sid.ses_nid == LNET_NID_ANY ||
            request->tsr_ndest > SFW_MAX_NDESTS ||
-           (request->tsr_is_client && request->tsr_ndest == 0) ||
+           (request->tsr_is_client && !request->tsr_ndest) ||
            request->tsr_concur > SFW_MAX_CONCUR ||
            request->tsr_service > SRPC_SERVICE_MAX_ID ||
            request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
                int npg = sfw_id_pages(request->tsr_ndest);
                int len;
 
-               if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+               if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
                        len = npg * PAGE_CACHE_SIZE;
 
                } else  {
        }
 
        rc = sfw_add_test_instance(bat, rpc);
-       CDEBUG(rc == 0 ? D_NET : D_WARNING,
+       CDEBUG(!rc ? D_NET : D_WARNING,
               "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
-              rc == 0 ? "Added" : "Failed to add", request->tsr_service,
+              !rc ? "Added" : "Failed to add", request->tsr_service,
               request->tsr_is_client ? "client" : "server",
               request->tsr_loop, request->tsr_concur, request->tsr_ndest);
 
        }
 
        /* Remove timer to avoid racing with it or expiring active session */
-       if (sfw_del_session_timer() != 0) {
+       if (sfw_del_session_timer()) {
                CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
                       sv->sv_name, libcfs_id2str(rpc->srpc_peer));
                spin_unlock(&sfw_data.fw_lock);
                        goto out;
                }
 
-       } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+       } else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
                /*
                 * NB: at this point, old version will ignore features and
                 * create new session anyway, so console should be able
 
        spin_lock(&sfw_data.fw_lock);
 
-       if (status != 0) {
+       if (status) {
                CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
                       sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
                spin_unlock(&sfw_data.fw_lock);
                return -ESHUTDOWN;
        }
 
-       if (sfw_del_session_timer() != 0) {
+       if (sfw_del_session_timer()) {
                CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
                       sv->sv_name, libcfs_id2str(rpc->srpc_peer));
                spin_unlock(&sfw_data.fw_lock);
        LASSERT(!sfw_data.fw_shuttingdown);
        LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-       if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
+       if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
                rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
                                 srpc_client_rpc_t, crpc_list);
                list_del(&rpc->crpc_list);
        if (!rpc) {
                rpc = srpc_create_client_rpc(peer, service,
                                             nbulkiov, bulklen, done,
-                                            nbulkiov != 0 ?  NULL :
+                                            nbulkiov ?  NULL :
                                             sfw_client_rpc_fini,
                                             priv);
        }
                return -EINVAL;
        }
 
-       if (session_timeout == 0)
+       if (!session_timeout)
                CWARN("Zero session_timeout specified - test sessions never expire.\n");
 
-       if (rpc_timeout == 0)
+       if (!rpc_timeout)
                CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
 
        memset(&sfw_data, 0, sizeof(struct smoketest_framework));
        brw_init_test_client();
        brw_init_test_service();
        rc = sfw_register_test(&brw_test_service, &brw_test_client);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        ping_init_test_client();
        ping_init_test_service();
        rc = sfw_register_test(&ping_test_service, &ping_test_client);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        error = 0;
        list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
 
                rc = srpc_add_service(sv);
                LASSERT(rc != -EBUSY);
-               if (rc != 0) {
+               if (rc) {
                        CWARN("Failed to add %s service: %d\n",
                              sv->sv_name, rc);
                        error = rc;
 
                rc = srpc_add_service(sv);
                LASSERT(rc != -EBUSY);
-               if (rc != 0) {
+               if (rc) {
                        CWARN("Failed to add %s service: %d\n",
                              sv->sv_name, rc);
                        error = rc;
                        continue;
 
                rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
-               if (rc != 0) {
+               if (rc) {
                        CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
                              sv->sv_name, sv->sv_wi_total, rc);
                        error = -ENOMEM;
                }
        }
 
-       if (error != 0)
+       if (error)
                sfw_shutdown();
        return error;
 }
        lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock,
                       "waiting for active RPC to finish.\n");
 
-       if (sfw_del_session_timer() != 0)
+       if (sfw_del_session_timer())
                lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock,
                               "waiting for session timer to explode.\n");
 
        sfw_deactivate_session();
-       lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+       lst_wait_until(!atomic_read(&sfw_data.fw_nzombies),
                       sfw_data.fw_lock,
                       "waiting for %d zombie sessions to die.\n",
                       atomic_read(&sfw_data.fw_nzombies));
 
 
        rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
                                 1, &lst_sched_serial);
-       if (rc != 0) {
+       if (rc) {
                CERROR("Failed to create serial WI scheduler for LST\n");
                return rc;
        }
                nthrs = max(nthrs - 1, 1);
                rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
                                         nthrs, &lst_sched_test[i]);
-               if (rc != 0) {
+               if (rc) {
                        CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
                               i);
                        goto error;
        }
 
        rc = srpc_startup();
-       if (rc != 0) {
+       if (rc) {
                CERROR("LST can't startup rpc\n");
                goto error;
        }
        lst_init_step = LST_INIT_RPC;
 
        rc = sfw_startup();
-       if (rc != 0) {
+       if (rc) {
                CERROR("LST can't startup framework\n");
                goto error;
        }
        lst_init_step = LST_INIT_FW;
 
        rc = lstcon_console_init();
-       if (rc != 0) {
+       if (rc) {
                CERROR("LST can't startup console\n");
                goto error;
        }
 
        sfw_session_t *sn = tsi->tsi_batch->bat_session;
 
        LASSERT(tsi->tsi_is_client);
-       LASSERT(sn && (sn->sn_features & ~LST_FEATS_MASK) == 0);
+       LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
 
        spin_lock_init(&lst_ping_data.pnd_lock);
        lst_ping_data.pnd_counter = 0;
        int rc;
 
        LASSERT(sn);
-       LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+       LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
 
        rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
-       if (rc != 0)
+       if (rc)
                return rc;
 
        req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
 
        LASSERT(sn);
 
-       if (rpc->crpc_status != 0) {
+       if (rpc->crpc_status) {
                if (!tsi->tsi_stopping) /* rpc could have been aborted */
                        atomic_inc(&sn->sn_ping_errors);
                CERROR("Unable to ping %s (%d): %d\n",
        rep->pnr_seq   = req->pnr_seq;
        rep->pnr_magic = LST_PING_TEST_MAGIC;
 
-       if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+       if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
                replymsg->msg_ses_feats = LST_FEATS_MASK;
                rep->pnr_status = EPROTO;
                return 0;
 
                swi_init_workitem(&scd->scd_buf_wi, scd,
                                  srpc_add_buffer, lst_sched_test[i]);
 
-               if (i != 0 && srpc_serv_is_framework(svc)) {
+               if (i && srpc_serv_is_framework(svc)) {
                        /*
                         * NB: framework service only needs srpc_service_cd for
                         * one partition, but we allocate for all to make
 
        LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
 
-       if (srpc_service_init(sv) != 0)
+       if (srpc_service_init(sv))
                return -ENOMEM;
 
        spin_lock(&srpc_data.rpc_glock);
 
        rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
                          local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("LNetMEAttach failed: %d\n", rc);
                LASSERT(rc == -ENOMEM);
                return -ENOMEM;
        md.eq_handle = srpc_data.rpc_lnet_eq;
 
        rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("LNetMDAttach failed: %d\n", rc);
                LASSERT(rc == -ENOMEM);
 
                rc = LNetMEUnlink(meh);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
                return -ENOMEM;
        }
 
        md.start     = buf;
        md.length    = len;
        md.eq_handle = srpc_data.rpc_lnet_eq;
-       md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
+       md.threshold = options & LNET_MD_OP_GET ? 2 : 1;
        md.options   = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
 
        rc = LNetMDBind(md, LNET_UNLINK, mdh);
-       if (rc != 0) {
+       if (rc) {
                CERROR("LNetMDBind failed: %d\n", rc);
                LASSERT(rc == -ENOMEM);
                return -ENOMEM;
         * they're only meaningful for MDs attached to an ME (i.e. passive
         * buffers...
         */
-       if ((options & LNET_MD_OP_PUT) != 0) {
+       if (options & LNET_MD_OP_PUT) {
                rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
                             portal, matchbits, 0, 0);
        } else {
-               LASSERT((options & LNET_MD_OP_GET) != 0);
+               LASSERT(options & LNET_MD_OP_GET);
 
                rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
        }
 
-       if (rc != 0) {
+       if (rc) {
                CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
-                      ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
+                      options & LNET_MD_OP_PUT ? "Put" : "Get",
                       libcfs_id2str(peer), portal, matchbits, rc);
 
                /*
                 * with failure, so fall through and return success here.
                 */
                rc = LNetMDUnlink(*mdh);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
        } else {
                CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
                       libcfs_id2str(peer), portal, matchbits);
         */
        spin_lock(&scd->scd_lock);
 
-       if (rc == 0) {
+       if (!rc) {
                if (!sv->sv_shuttingdown)
                        return 0;
 
                }
 
                rc = srpc_service_post_buffer(scd, buf);
-               if (rc != 0)
+               if (rc)
                        break; /* buf has been freed inside */
 
                LASSERT(scd->scd_buf_posting > 0);
                scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
        }
 
-       if (rc != 0) {
+       if (rc) {
                scd->scd_buf_err_stamp = ktime_get_real_seconds();
                scd->scd_buf_err = rc;
 
                 * block all WIs pending on lst_sched_serial for a moment
                 * which is not good but not fatal.
                 */
-               lst_wait_until(scd->scd_buf_err != 0 ||
-                              (scd->scd_buf_adjust == 0 &&
-                               scd->scd_buf_posting == 0),
+               lst_wait_until(scd->scd_buf_err ||
+                              (!scd->scd_buf_adjust &&
+                               !scd->scd_buf_posting),
                               scd->scd_lock, "waiting for adding buffer\n");
 
-               if (scd->scd_buf_err != 0 && rc == 0)
+               if (scd->scd_buf_err && !rc)
                        rc = scd->scd_buf_err;
 
                spin_unlock(&scd->scd_lock);
        __must_hold(&scd->scd_lock)
 {
        if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
-               if (srpc_service_post_buffer(scd, buf) != 0) {
+               if (srpc_service_post_buffer(scd, buf)) {
                        CWARN("Failed to post %s buffer\n",
                              scd->scd_svc->sv_name);
                }
        if (scd->scd_buf_adjust < 0) {
                scd->scd_buf_adjust++;
                if (scd->scd_buf_adjust < 0 &&
-                   scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
+                   !scd->scd_buf_total && !scd->scd_buf_posting) {
                        CDEBUG(D_INFO,
                               "Try to recycle %d buffers but nothing left\n",
                               scd->scd_buf_adjust);
                                    sizeof(srpc_msg_t), LNET_MD_OP_PUT,
                                    rpc->crpc_dest, LNET_NID_ANY,
                                    &rpc->crpc_reqstmdh, ev);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(rc == -ENOMEM);
                ev->ev_fired = 1;  /* no more event expected */
        }
                                    &rpc->crpc_replymsg, sizeof(srpc_msg_t),
                                    LNET_MD_OP_PUT, rpc->crpc_dest,
                                    &rpc->crpc_replymdh, ev);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(rc == -ENOMEM);
                ev->ev_fired = 1;  /* no more event expected */
        }
 
        LASSERT(bk->bk_niov <= LNET_MAX_IOV);
 
-       if (bk->bk_niov == 0)
+       if (!bk->bk_niov)
                return 0; /* nothing to do */
 
        opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
        rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
                                    &bk->bk_iovs[0], bk->bk_niov, opt,
                                    rpc->crpc_dest, &bk->bk_mdh, ev);
-       if (rc != 0) {
+       if (rc) {
                LASSERT(rc == -ENOMEM);
                ev->ev_fired = 1;  /* no more event expected */
        }
                                   &bk->bk_iovs[0], bk->bk_niov, opt,
                                   rpc->srpc_peer, rpc->srpc_self,
                                   &bk->bk_mdh, ev);
-       if (rc != 0)
+       if (rc)
                ev->ev_fired = 1;  /* no more event expected */
        return rc;
 }
        struct srpc_service *sv  = scd->scd_svc;
        srpc_buffer_t *buffer;
 
-       LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
+       LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
 
        rpc->srpc_status = status;
 
-       CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
+       CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
                     "Server RPC %p done: service %s, peer %s, status %s:%d\n",
                     rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
                     swi_state2str(rpc->srpc_wi.swi_state), status);
 
-       if (status != 0) {
+       if (status) {
                spin_lock(&srpc_data.rpc_glock);
                srpc_data.rpc_counters.rpcs_dropped++;
                spin_unlock(&srpc_data.rpc_glock);
                msg = &rpc->srpc_reqstbuf->buf_msg;
                reply = &rpc->srpc_replymsg.msg_body.reply;
 
-               if (msg->msg_magic == 0) {
+               if (!msg->msg_magic) {
                        /* moaned already in srpc_lnet_ev_handler */
                        srpc_server_rpc_done(rpc, EBADMSG);
                        return 1;
                } else {
                        reply->status = 0;
                        rc = (*sv->sv_handler)(rpc);
-                       LASSERT(reply->status == 0 || !rpc->srpc_bulk);
-                       if (rc != 0) {
+                       LASSERT(!reply->status || !rpc->srpc_bulk);
+                       if (rc) {
                                srpc_server_rpc_done(rpc, rc);
                                return 1;
                        }
 
                if (rpc->srpc_bulk) {
                        rc = srpc_do_bulk(rpc);
-                       if (rc == 0)
+                       if (!rc)
                                return 0; /* wait for bulk */
 
                        LASSERT(ev->ev_fired);
                        if (sv->sv_bulk_ready)
                                rc = (*sv->sv_bulk_ready) (rpc, rc);
 
-                       if (rc != 0) {
+                       if (rc) {
                                srpc_server_rpc_done(rpc, rc);
                                return 1;
                        }
 
                wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
                rc = srpc_send_reply(rpc);
-               if (rc == 0)
+               if (!rc)
                        return 0; /* wait for reply */
                srpc_server_rpc_done(rpc, rc);
                return 1;
 {
        stt_timer_t *timer = &rpc->crpc_timer;
 
-       if (rpc->crpc_timeout == 0)
+       if (!rpc->crpc_timeout)
                return;
 
        INIT_LIST_HEAD(&timer->stt_list);
 srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
 {
        /* timer not planted or already exploded */
-       if (rpc->crpc_timeout == 0)
+       if (!rpc->crpc_timeout)
                return;
 
        /* timer successfully defused */
                return;
 
        /* timer detonated, wait for it to explode */
-       while (rpc->crpc_timeout != 0) {
+       while (rpc->crpc_timeout) {
                spin_unlock(&rpc->crpc_lock);
 
                schedule();
 {
        swi_workitem_t *wi = &rpc->crpc_wi;
 
-       LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
+       LASSERT(status || wi->swi_state == SWI_STATE_DONE);
 
        spin_lock(&rpc->crpc_lock);
 
        rpc->crpc_closed = 1;
-       if (rpc->crpc_status == 0)
+       if (!rpc->crpc_status)
                rpc->crpc_status = status;
 
        srpc_del_client_rpc_timer(rpc);
 
-       CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
+       CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
                     "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
                     rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
                     swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
                LASSERT(!srpc_event_pending(rpc));
 
                rc = srpc_prepare_reply(rpc);
-               if (rc != 0) {
+               if (rc) {
                        srpc_client_rpc_done(rpc, rc);
                        return 1;
                }
 
                rc = srpc_prepare_bulk(rpc);
-               if (rc != 0)
+               if (rc)
                        break;
 
                wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
                        break;
 
                rc = rpc->crpc_reqstev.ev_status;
-               if (rc != 0)
+               if (rc)
                        break;
 
                wi->swi_state = SWI_STATE_REQUEST_SENT;
                        break;
 
                rc = rpc->crpc_replyev.ev_status;
-               if (rc != 0)
+               if (rc)
                        break;
 
                srpc_unpack_msg_hdr(reply);
                        break;
                }
 
-               if (do_bulk && reply->msg_body.reply.status != 0) {
+               if (do_bulk && reply->msg_body.reply.status) {
                        CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
                              reply->msg_body.reply.status,
                              libcfs_id2str(rpc->crpc_dest));
                 * remote error.
                 */
                if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
-                   rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
+                   !rpc->crpc_status && reply->msg_body.reply.status)
                        rc = 0;
 
                wi->swi_state = SWI_STATE_DONE;
                return 1;
        }
 
-       if (rc != 0) {
+       if (rc) {
                spin_lock(&rpc->crpc_lock);
                srpc_abort_rpc(rpc, rc);
                spin_unlock(&rpc->crpc_lock);
 void
 srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
 {
-       LASSERT(why != 0);
+       LASSERT(why);
 
        if (rpc->crpc_aborted || /* already aborted */
            rpc->crpc_closed)    /* callback imminent */
                 * Repost buffer before replying since test client
                 * might send me another RPC once it gets the reply
                 */
-               if (srpc_service_post_buffer(scd, buffer) != 0)
+               if (srpc_service_post_buffer(scd, buffer))
                        CWARN("Failed to repost %s buffer\n", sv->sv_name);
                rpc->srpc_reqstbuf = NULL;
        }
                                   sizeof(*msg), LNET_MD_OP_PUT,
                                   rpc->srpc_peer, rpc->srpc_self,
                                   &rpc->srpc_replymdh, ev);
-       if (rc != 0)
+       if (rc)
                ev->ev_fired = 1;  /* no more event expected */
        return rc;
 }
 
        LASSERT(!in_interrupt());
 
-       if (ev->status != 0) {
+       if (ev->status) {
                spin_lock(&srpc_data.rpc_glock);
                srpc_data.rpc_counters.errors++;
                spin_unlock(&srpc_data.rpc_glock);
                       rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
                LBUG();
        case SRPC_REQUEST_SENT:
-               if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+               if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
                        spin_lock(&srpc_data.rpc_glock);
                        srpc_data.rpc_counters.rpcs_sent++;
                        spin_unlock(&srpc_data.rpc_glock);
 
                spin_lock(&crpc->crpc_lock);
 
-               LASSERT(rpcev->ev_fired == 0);
+               LASSERT(!rpcev->ev_fired);
                rpcev->ev_fired  = 1;
                rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
                                                -EINTR : ev->status;
                        break;
                }
 
-               if (scd->scd_buf_err_stamp != 0 &&
+               if (scd->scd_buf_err_stamp &&
                    scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
                        /* re-enable adding buffer */
                        scd->scd_buf_err_stamp = 0;
                        scd->scd_buf_err = 0;
                }
 
-               if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
-                   scd->scd_buf_adjust == 0 &&
+               if (!scd->scd_buf_err && /* adding buffer is enabled */
+                   !scd->scd_buf_adjust &&
                    scd->scd_buf_nposted < scd->scd_buf_low) {
                        scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
                                                  SFW_TEST_WI_MIN);
                msg = &buffer->buf_msg;
                type = srpc_service2request(sv->sv_id);
 
-               if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
+               if (ev->status || ev->mlength != sizeof(*msg) ||
                    (msg->msg_type != type &&
                     msg->msg_type != __swab32(type)) ||
                    (msg->msg_magic != SRPC_MSG_MAGIC &&
                        break; /* wait for final event */
 
        case SRPC_BULK_PUT_SENT:
-               if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+               if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
                        spin_lock(&srpc_data.rpc_glock);
 
                        if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
 
        LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
        rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
-       if (rc != 0) {
+       if (rc) {
                CERROR("LNetEQAlloc() has failed: %d\n", rc);
                goto bail;
        }
 
        rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
        rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
-       LASSERT(rc == 0);
+       LASSERT(!rc);
 
        srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
 
        rc = stt_startup();
 
 bail:
-       if (rc != 0)
+       if (rc)
                srpc_shutdown();
        else
                srpc_data.rpc_state = SRPC_STATE_RUNNING;
        case SRPC_STATE_EQ_INIT:
                rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
                rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
-               LASSERT(rc == 0);
+               LASSERT(!rc);
                rc = LNetEQFree(srpc_data.rpc_lnet_eq);
-               LASSERT(rc == 0); /* the EQ should have no user by now */
+               LASSERT(!rc); /* the EQ should have no user by now */
 
        case SRPC_STATE_NI_INIT:
                LNetNIFini();
 
                srpc_destroy_client_rpc(rpc);                            \
 } while (0)
 
-#define srpc_event_pending(rpc)   ((rpc)->crpc_bulkev.ev_fired == 0 ||   \
-                                  (rpc)->crpc_reqstev.ev_fired == 0 ||  \
-                                  (rpc)->crpc_replyev.ev_fired == 0)
+#define srpc_event_pending(rpc)   (!(rpc)->crpc_bulkev.ev_fired ||     \
+                                  !(rpc)->crpc_reqstev.ev_fired ||     \
+                                  !(rpc)->crpc_replyev.ev_fired)
 
 /* CPU partition data of srpc service */
 struct srpc_service_cd {
 {
        LASSERT(rpc);
        LASSERT(!srpc_event_pending(rpc));
-       LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+       LASSERT(!atomic_read(&rpc->crpc_refcount));
 
        if (!rpc->crpc_fini)
                LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
 
        LASSERT(sv->sv_shuttingdown);
 
-       while (srpc_finish_service(sv) == 0) {
+       while (!srpc_finish_service(sv)) {
                i++;
                CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
                       "Waiting for %s service to shutdown...\n",
 
        stt_data.stt_nthreads = 0;
        init_waitqueue_head(&stt_data.stt_waitq);
        rc = stt_start_timer_thread();
-       if (rc != 0)
+       if (rc)
                CERROR("Can't spawn timer thread: %d\n", rc);
 
        return rc;
        stt_data.stt_shuttingdown = 1;
 
        wake_up(&stt_data.stt_waitq);
-       lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+       lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock,
                       "waiting for %d threads to terminate\n",
                       stt_data.stt_nthreads);