static inline int node_is_left_child(struct interval_node *node)
 {
-       LASSERT(node->in_parent != NULL);
        return node == node->in_parent->in_left;
 }
 
 static inline int node_is_right_child(struct interval_node *node)
 {
-       LASSERT(node->in_parent != NULL);
        return node == node->in_parent->in_right;
 }
 
 
 
        LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
        node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO);
-       if (node == NULL)
+       if (!node)
                return NULL;
 
        INIT_LIST_HEAD(&node->li_group);
 {
        struct ldlm_interval *n = l->l_tree_node;
 
-       if (n == NULL)
+       if (!n)
                return NULL;
 
        LASSERT(!list_empty(&n->li_group));
        LASSERT(lock->l_granted_mode == lock->l_req_mode);
 
        node = lock->l_tree_node;
-       LASSERT(node != NULL);
+       LASSERT(node);
        LASSERT(!interval_is_intree(&node->li_node));
 
        idx = lock_mode_to_index(lock->l_granted_mode);
                struct ldlm_interval *tmp;
 
                tmp = ldlm_interval_detach(lock);
-               LASSERT(tmp != NULL);
                ldlm_interval_free(tmp);
                ldlm_interval_attach(to_ldlm_interval(found), lock);
        }
        LASSERT(lock->l_granted_mode == 1 << idx);
        tree = &res->lr_itree[idx];
 
-       LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+       LASSERT(tree->lit_root); /* assure the tree is not null */
 
        tree->lit_size--;
        node = ldlm_interval_detach(lock);
 
                lock->l_policy_data.l_flock.start =
                        new->l_policy_data.l_flock.end + 1;
                new2->l_conn_export = lock->l_conn_export;
-               if (lock->l_export != NULL) {
+               if (lock->l_export) {
                        new2->l_export = class_export_lock_get(lock->l_export,
                                                               new2);
                        if (new2->l_export->exp_lock_hash &&
        }
 
        /* if new2 is created but never used, destroy it*/
-       if (splitted == 0 && new2 != NULL)
+       if (splitted == 0 && new2)
                ldlm_lock_destroy_nolock(new2);
 
        /* At this point we're granting the lock request. */
        if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
            (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
                if (lock->l_req_mode == lock->l_granted_mode &&
-                   lock->l_granted_mode != LCK_NL &&
-                   data == NULL)
+                   lock->l_granted_mode != LCK_NL && !data)
                        ldlm_lock_decref_internal(lock, lock->l_req_mode);
 
                /* Need to wake up the waiter if we were evicted */
 
        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
                       LDLM_FL_BLOCK_CONV))) {
-               if (data == NULL)
+               if (!data)
                        /* mds granted the lock in the reply */
                        goto granted;
                /* CP AST RPC: lock get granted, wake it up */
        obd = class_exp2obd(lock->l_conn_export);
 
        /* if this is a local lock, there is no import */
-       if (obd != NULL)
+       if (obd)
                imp = obd->u.cli.cl_import;
 
-       if (imp != NULL) {
+       if (imp) {
                spin_lock(&imp->imp_lock);
                fwd.fwd_generation = imp->imp_generation;
                spin_unlock(&imp->imp_lock);
 
                           &obddev->obd_ldlm_client);
 
        imp = class_new_import(obddev);
-       if (imp == NULL) {
+       if (!imp) {
                rc = -ENOENT;
                goto err_ldlm;
        }
                                                   LDLM_NAMESPACE_CLIENT,
                                                   LDLM_NAMESPACE_GREEDY,
                                                   ns_type);
-       if (obddev->obd_namespace == NULL) {
+       if (!obddev->obd_namespace) {
                CERROR("Unable to create client namespace - %s\n",
                       obddev->obd_name);
                rc = -ENOMEM;
        ldlm_namespace_free_post(obddev->obd_namespace);
        obddev->obd_namespace = NULL;
 
-       LASSERT(obddev->u.cli.cl_import == NULL);
+       LASSERT(!obddev->u.cli.cl_import);
 
        ldlm_put_ref();
        return 0;
                LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
                goto out_ldlm;
        }
-       LASSERT(*exp != NULL && (*exp)->exp_connection);
+       LASSERT(*exp && (*exp)->exp_connection);
 
        if (data) {
                LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
         * never added.) */
        (void)ptlrpc_pinger_del_import(imp);
 
-       if (obd->obd_namespace != NULL) {
+       if (obd->obd_namespace) {
                /* obd_force == local only */
                ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
                                       obd->obd_force ? LCF_LOCAL : 0, NULL);
 
        svcpt = req->rq_rqbd->rqbd_svcpt;
        rs = req->rq_reply_state;
-       if (rs == NULL || !rs->rs_difficult) {
+       if (!rs || !rs->rs_difficult) {
                /* no notifiers */
                target_send_reply_msg(req, rc, fail_id);
                return;
        }
 
        /* must be an export if locks saved */
-       LASSERT(req->rq_export != NULL);
+       LASSERT(req->rq_export);
        /* req/reply consistent */
        LASSERT(rs->rs_svcpt == svcpt);
 
        LASSERT(!rs->rs_scheduled_ever);
        LASSERT(!rs->rs_handled);
        LASSERT(!rs->rs_on_net);
-       LASSERT(rs->rs_export == NULL);
+       LASSERT(!rs->rs_export);
        LASSERT(list_empty(&rs->rs_obd_list));
        LASSERT(list_empty(&rs->rs_exp_list));
 
 
 {
        struct ldlm_lock *lock;
 
-       if (resource == NULL)
-               LBUG();
+       LASSERT(resource);
 
        lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO);
-       if (lock == NULL)
+       if (!lock)
                return NULL;
 
        spin_lock_init(&lock->l_lock);
        unlock_res_and_lock(lock);
 
        newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
-       if (newres == NULL)
+       if (!newres)
                return -ENOMEM;
 
        lu_ref_add(&newres->lr_reference, "lock", lock);
        LASSERT(handle);
 
        lock = class_handle2object(handle->cookie);
-       if (lock == NULL)
+       if (!lock)
                return NULL;
 
        /* It's unlikely but possible that someone marked the lock as
 
        lock_res_and_lock(lock);
 
-       LASSERT(lock->l_resource != NULL);
+       LASSERT(lock->l_resource);
 
        lu_ref_add_atomic(&lock->l_reference, "handle", current);
        if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
                LASSERT(list_empty(&lock->l_bl_ast));
                list_add(&lock->l_bl_ast, work_list);
                LDLM_LOCK_GET(lock);
-               LASSERT(lock->l_blocking_lock == NULL);
+               LASSERT(!lock->l_blocking_lock);
                lock->l_blocking_lock = LDLM_LOCK_GET(new);
        }
 }
        struct ldlm_lock *lock;
 
        lock = ldlm_handle2lock(lockh);
-       LASSERT(lock != NULL);
+       LASSERT(lock);
        ldlm_lock_addref_internal(lock, mode);
        LDLM_LOCK_PUT(lock);
 }
 
        result = -EAGAIN;
        lock = ldlm_handle2lock(lockh);
-       if (lock != NULL) {
+       if (lock) {
                lock_res_and_lock(lock);
                if (lock->l_readers != 0 || lock->l_writers != 0 ||
                    !(lock->l_flags & LDLM_FL_CBPENDING)) {
 {
        struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
-       LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
+       LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie);
        ldlm_lock_decref_internal(lock, mode);
        LDLM_LOCK_PUT(lock);
 }
 {
        struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
-       LASSERT(lock != NULL);
+       LASSERT(lock);
 
        LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
        lock_res_and_lock(lock);
        if (lock->l_granted_mode < res->lr_most_restr)
                res->lr_most_restr = lock->l_granted_mode;
 
-       if (work_list && lock->l_completion_ast != NULL)
+       if (work_list && lock->l_completion_ast)
                ldlm_add_ast_work_item(lock, NULL, work_list);
 
        ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
        struct ldlm_lock *lock, *old_lock = NULL;
        int rc = 0;
 
-       if (ns == NULL) {
+       if (!ns) {
                old_lock = ldlm_handle2lock(lockh);
                LASSERT(old_lock);
 
        }
 
        res = ldlm_resource_get(ns, NULL, res_id, type, 0);
-       if (res == NULL) {
-               LASSERT(old_lock == NULL);
+       if (!res) {
+               LASSERT(!old_lock);
                return 0;
        }
 
 
        lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
                            flags, unref);
-       if (lock != NULL) {
+       if (lock) {
                rc = 1;
                goto out;
        }
        }
        lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
                            flags, unref);
-       if (lock != NULL) {
+       if (lock) {
                rc = 1;
                goto out;
        }
        ldlm_mode_t mode = 0;
 
        lock = ldlm_handle2lock(lockh);
-       if (lock != NULL) {
+       if (lock) {
                lock_res_and_lock(lock);
                if (lock->l_flags & LDLM_FL_GONE_MASK)
                        goto out;
        }
 
 out:
-       if (lock != NULL) {
+       if (lock) {
                unlock_res_and_lock(lock);
                LDLM_LOCK_PUT(lock);
        }
 {
        void *lvb;
 
-       LASSERT(data != NULL);
+       LASSERT(data);
        LASSERT(size >= 0);
 
        switch (lock->l_lvb_type) {
                                lvb = req_capsule_server_swab_get(pill,
                                                &RMF_DLM_LVB,
                                                lustre_swab_ost_lvb);
-                       if (unlikely(lvb == NULL)) {
+                       if (unlikely(!lvb)) {
                                LDLM_ERROR(lock, "no LVB");
                                return -EPROTO;
                        }
                                lvb = req_capsule_server_sized_swab_get(pill,
                                                &RMF_DLM_LVB, size,
                                                lustre_swab_ost_lvb_v1);
-                       if (unlikely(lvb == NULL)) {
+                       if (unlikely(!lvb)) {
                                LDLM_ERROR(lock, "no LVB");
                                return -EPROTO;
                        }
                                lvb = req_capsule_server_swab_get(pill,
                                                &RMF_DLM_LVB,
                                                lustre_swab_lquota_lvb);
-                       if (unlikely(lvb == NULL)) {
+                       if (unlikely(!lvb)) {
                                LDLM_ERROR(lock, "no LVB");
                                return -EPROTO;
                        }
                        lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
                else
                        lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
-               if (unlikely(lvb == NULL)) {
+               if (unlikely(!lvb)) {
                        LDLM_ERROR(lock, "no LVB");
                        return -EPROTO;
                }
        struct ldlm_resource *res;
 
        res = ldlm_resource_get(ns, NULL, res_id, type, 1);
-       if (res == NULL)
+       if (!res)
                return NULL;
 
        lock = ldlm_lock_new(res);
 
-       if (lock == NULL)
+       if (!lock)
                return NULL;
 
        lock->l_req_mode = mode;
        lock->l_tree_node = NULL;
        /* if this is the extent lock, allocate the interval tree node */
        if (type == LDLM_EXTENT) {
-               if (ldlm_interval_alloc(lock) == NULL)
+               if (!ldlm_interval_alloc(lock))
                        goto out;
        }
 
        lock->l_flags &= ~LDLM_FL_CP_REQD;
        unlock_res_and_lock(lock);
 
-       if (completion_callback != NULL)
+       if (completion_callback)
                rc = completion_callback(lock, 0, (void *)arg);
        LDLM_LOCK_RELEASE(lock);
 
         * to keep the number of requests in flight to ns_max_parallel_ast */
        arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
                                     work_ast_lock, arg);
-       if (arg->set == NULL) {
+       if (!arg->set) {
                rc = -ENOMEM;
                goto out;
        }
        int rc = -EINVAL;
 
        if (lock) {
-               if (lock->l_ast_data == NULL)
+               if (!lock->l_ast_data)
                        lock->l_ast_data = data;
                if (lock->l_ast_data == data)
                        rc = 0;
                return;
 
        lock = ldlm_handle2lock(lockh);
-       if (lock == NULL)
+       if (!lock)
                return;
 
        LDLM_DEBUG_LIMIT(level, lock, "###");
 
        if (exp && exp->exp_connection) {
                nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
-       } else if (exp && exp->exp_obd != NULL) {
+       } else if (exp && exp->exp_obd) {
                struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
 
                nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
        }
 
-       if (resource == NULL) {
+       if (!resource) {
                libcfs_debug_vmsg2(msgdata, fmt, args,
                                   " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
                                   lock,
 
                CDEBUG(D_DLMTRACE,
                       "Lock %p already unused, calling callback (%p)\n", lock,
                       lock->l_blocking_ast);
-               if (lock->l_blocking_ast != NULL)
+               if (lock->l_blocking_ast)
                        lock->l_blocking_ast(lock, ld, lock->l_ast_data,
                                             LDLM_CB_BLOCKING);
        } else {
        } else if (lvb_len > 0) {
                if (lock->l_lvb_len > 0) {
                        /* for extent lock, lvb contains ost_lvb{}. */
-                       LASSERT(lock->l_lvb_data != NULL);
+                       LASSERT(lock->l_lvb_data);
 
                        if (unlikely(lock->l_lvb_len < lvb_len)) {
                                LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
                        }
 
                        lock_res_and_lock(lock);
-                       LASSERT(lock->l_lvb_data == NULL);
+                       LASSERT(!lock->l_lvb_data);
                        lock->l_lvb_type = LVB_T_LAYOUT;
                        lock->l_lvb_data = lvb_data;
                        lock->l_lvb_len = lvb_len;
 
        LDLM_DEBUG(lock, "client glimpse AST callback handler");
 
-       if (lock->l_glimpse_ast != NULL)
+       if (lock->l_glimpse_ast)
                rc = lock->l_glimpse_ast(lock, req);
 
-       if (req->rq_repmsg != NULL) {
+       if (req->rq_repmsg) {
                ptlrpc_reply(req);
        } else {
                req->rq_status = rc;
 
        blwi->blwi_ns = ns;
        blwi->blwi_flags = cancel_flags;
-       if (ld != NULL)
+       if (ld)
                blwi->blwi_ld = *ld;
        if (count) {
                list_add(&blwi->blwi_head, cancels);
        req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
 
        key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
-       if (key == NULL) {
+       if (!key) {
                DEBUG_REQ(D_IOCTL, req, "no set_info key");
                return -EFAULT;
        }
        keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
                                      RCL_CLIENT);
        val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
-       if (val == NULL) {
+       if (!val) {
                DEBUG_REQ(D_IOCTL, req, "no set_info val");
                return -EFAULT;
        }
        struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
 
        oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
-       if (oqctl == NULL) {
+       if (!oqctl) {
                CERROR("Can't unpack obd_quotactl\n");
                return -EPROTO;
        }
 
        req_capsule_init(&req->rq_pill, req, RCL_SERVER);
 
-       if (req->rq_export == NULL) {
+       if (!req->rq_export) {
                rc = ldlm_callback_reply(req, -ENOTCONN);
                ldlm_callback_errmsg(req, "Operate on unconnected server",
                                     rc, NULL);
                return 0;
        }
 
-       LASSERT(req->rq_export != NULL);
-       LASSERT(req->rq_export->exp_obd != NULL);
+       LASSERT(req->rq_export->exp_obd);
 
        switch (lustre_msg_get_opc(req->rq_reqmsg)) {
        case LDLM_BL_CALLBACK:
        }
 
        ns = req->rq_export->exp_obd->obd_namespace;
-       LASSERT(ns != NULL);
+       LASSERT(ns);
 
        req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
 
        dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
-       if (dlm_req == NULL) {
+       if (!dlm_req) {
                rc = ldlm_callback_reply(req, -EPROTO);
                ldlm_callback_errmsg(req, "Operate without parameter", rc,
                                     NULL);
 
                blwi = ldlm_bl_get_work(blp);
 
-               if (blwi == NULL) {
+               if (!blwi) {
                        atomic_dec(&blp->blp_busy_threads);
                        l_wait_event_exclusive(blp->blp_waitq,
-                                        (blwi = ldlm_bl_get_work(blp)) != NULL,
+                                        (blwi = ldlm_bl_get_work(blp)),
                                         &lwi);
                        busy = atomic_inc_return(&blp->blp_busy_threads);
                } else {
                        busy = atomic_read(&blp->blp_busy_threads);
                }
 
-               if (blwi->blwi_ns == NULL)
+               if (!blwi->blwi_ns)
                        /* added by ldlm_cleanup() */
                        break;
 
        int rc = 0;
        int i;
 
-       if (ldlm_state != NULL)
+       if (ldlm_state)
                return -EALREADY;
 
        ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
 
        ldlm_pools_fini();
 
-       if (ldlm_state->ldlm_bl_pool != NULL) {
+       if (ldlm_state->ldlm_bl_pool) {
                struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
 
                while (atomic_read(&blp->blp_num_threads) > 0) {
                kfree(blp);
        }
 
-       if (ldlm_state->ldlm_cb_service != NULL)
+       if (ldlm_state->ldlm_cb_service)
                ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
 
        if (ldlm_ns_kset)
        ldlm_resource_slab = kmem_cache_create("ldlm_resources",
                                               sizeof(struct ldlm_resource), 0,
                                               SLAB_HWCACHE_ALIGN, NULL);
-       if (ldlm_resource_slab == NULL)
+       if (!ldlm_resource_slab)
                return -ENOMEM;
 
        ldlm_lock_slab = kmem_cache_create("ldlm_locks",
                              sizeof(struct ldlm_lock), 0,
                              SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
-       if (ldlm_lock_slab == NULL) {
+       if (!ldlm_lock_slab) {
                kmem_cache_destroy(ldlm_resource_slab);
                return -ENOMEM;
        }
        ldlm_interval_slab = kmem_cache_create("interval_node",
                                        sizeof(struct ldlm_interval),
                                        0, SLAB_HWCACHE_ALIGN, NULL);
-       if (ldlm_interval_slab == NULL) {
+       if (!ldlm_interval_slab) {
                kmem_cache_destroy(ldlm_resource_slab);
                kmem_cache_destroy(ldlm_lock_slab);
                return -ENOMEM;
 
         */
        obd = container_of(pl, struct ldlm_namespace,
                           ns_pool)->ns_obd;
-       LASSERT(obd != NULL);
        read_lock(&obd->obd_pool_lock);
        pl->pl_server_lock_volume = obd->obd_pool_slv;
        atomic_set(&pl->pl_limit, obd->obd_pool_limit);
        spin_unlock(&pl->pl_lock);
 
  recalc:
-       if (pl->pl_ops->po_recalc != NULL) {
+       if (pl->pl_ops->po_recalc) {
                count = pl->pl_ops->po_recalc(pl);
                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
                                    count);
 {
        int cancel = 0;
 
-       if (pl->pl_ops->po_shrink != NULL) {
+       if (pl->pl_ops->po_shrink) {
                cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
                if (nr > 0) {
                        lprocfs_counter_add(pl->pl_stats,
 
 static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
 {
-       if (pl->pl_stats != NULL) {
+       if (pl->pl_stats) {
                lprocfs_free_stats(&pl->pl_stats);
                pl->pl_stats = NULL;
        }
-       if (pl->pl_debugfs_entry != NULL) {
+       if (pl->pl_debugfs_entry) {
                ldebugfs_remove(&pl->pl_debugfs_entry);
                pl->pl_debugfs_entry = NULL;
        }
                        continue;
                }
 
-               if (ns_old == NULL)
+               if (!ns_old)
                        ns_old = ns;
 
                ldlm_namespace_get(ns);
                        continue;
                }
 
-               if (ns_old == NULL)
+               if (!ns_old)
                        ns_old = ns;
 
                spin_lock(&ns->ns_lock);
        struct l_wait_info lwi = { 0 };
        struct task_struct *task;
 
-       if (ldlm_pools_thread != NULL)
+       if (ldlm_pools_thread)
                return -EALREADY;
 
        ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
 
 static void ldlm_pools_thread_stop(void)
 {
-       if (ldlm_pools_thread == NULL)
+       if (!ldlm_pools_thread)
                return;
 
        thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
 
        struct obd_import *imp;
        struct obd_device *obd;
 
-       if (lock->l_conn_export == NULL) {
+       if (!lock->l_conn_export) {
                static unsigned long next_dump, last_dump;
 
                LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
        obd = class_exp2obd(lock->l_conn_export);
 
        /* if this is a local lock, then there is no import */
-       if (obd != NULL)
+       if (obd)
                imp = obd->u.cli.cl_import;
 
        /* Wait a long time for enqueue - server may have to callback a
                                       interrupted_completion_wait, &lwd);
        }
 
-       if (imp != NULL) {
+       if (imp) {
                spin_lock(&imp->imp_lock);
                lwd.lwd_conn_cnt = imp->imp_conn_cnt;
                spin_unlock(&imp->imp_lock);
 
        /* Before we return, swab the reply */
        reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-       if (reply == NULL) {
+       if (!reply) {
                rc = -EPROTO;
                goto cleanup;
        }
 
        if (lvb_len != 0) {
-               LASSERT(lvb != NULL);
+               LASSERT(lvb);
 
                size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
                                            RCL_SERVER);
 
                        rc = ldlm_lock_change_resource(ns, lock,
                                        &reply->lock_desc.l_resource.lr_name);
-                       if (rc || lock->l_resource == NULL) {
+                       if (rc || !lock->l_resource) {
                                rc = -ENOMEM;
                                goto cleanup;
                        }
 
        if (!is_replay) {
                rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
-               if (lock->l_completion_ast != NULL) {
+               if (lock->l_completion_ast) {
                        int err = lock->l_completion_ast(lock, *flags, NULL);
 
                        if (!rc)
                }
        }
 
-       if (lvb_len && lvb != NULL) {
+       if (lvb_len && lvb) {
                /* Copy the LVB here, and not earlier, because the completion
                 * AST (if any) can override what we got in the reply */
                memcpy(lvb, lock->l_lvb_data, lvb_len);
        LIST_HEAD(head);
        int rc;
 
-       if (cancels == NULL)
+       if (!cancels)
                cancels = &head;
        if (ns_connect_cancelset(ns)) {
                /* Estimate the amount of available space in the request. */
        int                 rc, err;
        struct ptlrpc_request *req;
 
-       LASSERT(exp != NULL);
-
        ns = exp->exp_obd->obd_namespace;
 
        /* If we're replaying this lock, just check some invariants.
         * If we're creating a new lock, get everything all setup nice. */
        if (is_replay) {
                lock = ldlm_handle2lock_long(lockh, 0);
-               LASSERT(lock != NULL);
+               LASSERT(lock);
                LDLM_DEBUG(lock, "client-side enqueue START");
                LASSERT(exp == lock->l_conn_export);
        } else {
                lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
                                        einfo->ei_mode, &cbs, einfo->ei_cbdata,
                                        lvb_len, lvb_type);
-               if (lock == NULL)
+               if (!lock)
                        return -ENOMEM;
                /* for the local lock, add the reference */
                ldlm_lock_addref_internal(lock, einfo->ei_mode);
                ldlm_lock2handle(lock, lockh);
-               if (policy != NULL)
-                               lock->l_policy_data = *policy;
+               if (policy)
+                       lock->l_policy_data = *policy;
 
                if (einfo->ei_type == LDLM_EXTENT)
                        lock->l_req_extent = policy->l_extent;
 
        /* lock not sent to server yet */
 
-       if (reqp == NULL || *reqp == NULL) {
+       if (!reqp || !*reqp) {
                req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
                                                &RQF_LDLM_ENQUEUE,
                                                LUSTRE_DLM_VERSION,
                                                LDLM_ENQUEUE);
-               if (req == NULL) {
+               if (!req) {
                        failed_lock_cleanup(ns, lock, einfo->ei_mode);
                        LDLM_LOCK_RELEASE(lock);
                        return -ENOMEM;
                     policy->l_extent.end == OBD_OBJECT_EOF));
 
        if (async) {
-               LASSERT(reqp != NULL);
+               LASSERT(reqp);
                return 0;
        }
 
        else
                rc = err;
 
-       if (!req_passed_in && req != NULL) {
+       if (!req_passed_in && req) {
                ptlrpc_req_finished(req);
                if (reqp)
                        *reqp = NULL;
        int max, packed = 0;
 
        dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
-       LASSERT(dlm != NULL);
+       LASSERT(dlm);
 
        /* Check the room in the request buffer. */
        max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
        int free, sent = 0;
        int rc = 0;
 
-       LASSERT(exp != NULL);
+       LASSERT(exp);
        LASSERT(count > 0);
 
        CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
 
        while (1) {
                imp = class_exp2cliimp(exp);
-               if (imp == NULL || imp->imp_invalid) {
+               if (!imp || imp->imp_invalid) {
                        CDEBUG(D_DLMTRACE,
                               "skipping cancel on invalid import %p\n", imp);
                        return count;
                }
 
                req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
-               if (req == NULL) {
+               if (!req) {
                        rc = -ENOMEM;
                        goto out;
                }
 
 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
 {
-       LASSERT(imp != NULL);
        return &imp->imp_obd->obd_namespace->ns_pool;
 }
 
 
        /* concurrent cancels on the same handle can happen */
        lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
-       if (lock == NULL) {
+       if (!lock) {
                LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
                return 0;
        }
                count += unused - ns->ns_max_unused;
 
        pf = ldlm_cancel_lru_policy(ns, flags);
-       LASSERT(pf != NULL);
+       LASSERT(pf);
 
        while (!list_empty(&ns->ns_unused_list)) {
                ldlm_policy_res_t result;
 
        lock_res(res);
        list_for_each_entry(lock, &res->lr_granted, l_res_link) {
-               if (opaque != NULL && lock->l_ast_data != opaque) {
+               if (opaque && lock->l_ast_data != opaque) {
                        LDLM_ERROR(lock, "data %p doesn't match opaque %p",
                                   lock->l_ast_data, opaque);
                        continue;
        int rc;
 
        res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
-       if (res == NULL) {
+       if (!res) {
                /* This is not a problem. */
                CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
                return 0;
                .lc_opaque      = opaque,
        };
 
-       if (ns == NULL)
+       if (!ns)
                return ELDLM_OK;
 
-       if (res_id != NULL) {
+       if (res_id) {
                return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
                                                       LCK_MINMODE, flags,
                                                       opaque);
        struct ldlm_resource *res;
        int rc;
 
-       if (ns == NULL) {
+       if (!ns) {
                CERROR("must pass in namespace\n");
                LBUG();
        }
 
        res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
-       if (res == NULL)
+       if (!res)
                return 0;
 
        LDLM_RESOURCE_ADDREF(res);
                goto out;
 
        reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
-       if (reply == NULL) {
+       if (!reply) {
                rc = -EPROTO;
                goto out;
        }
 
        req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
                                        LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
-       if (req == NULL)
+       if (!req)
                return -ENOMEM;
 
        /* We're part of recovery, so don't wait for it. */
 
        else
                ldebugfs_remove(&ns->ns_debugfs_entry);
 
-       if (ns->ns_stats != NULL)
+       if (ns->ns_stats)
                lprocfs_free_stats(&ns->ns_stats);
 }
 
                                   "%s", ldlm_ns_name(ns));
 
        ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
-       if (ns->ns_stats == NULL) {
+       if (!ns->ns_stats) {
                kobject_put(&ns->ns_kobj);
                return -ENOMEM;
        }
        } else {
                ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
                                              ldlm_ns_debugfs_dir);
-               if (ns_entry == NULL)
+               if (!ns_entry)
                        return -ENOMEM;
                ns->ns_debugfs_entry = ns_entry;
        }
        int                 idx;
        int                 rc;
 
-       LASSERT(obd != NULL);
+       LASSERT(obd);
 
        rc = ldlm_get_ref();
        if (rc) {
                                         CFS_HASH_BIGNAME |
                                         CFS_HASH_SPIN_BKTLOCK |
                                         CFS_HASH_NO_ITEMREF);
-       if (ns->ns_rs_hash == NULL)
+       if (!ns->ns_rs_hash)
                goto out_ns;
 
        cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
                        break;
                }
 
-               if (lock == NULL) {
+               if (!lock) {
                        unlock_res(res);
                        break;
                }
  */
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
 {
-       if (ns == NULL) {
+       if (!ns) {
                CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
                return ELDLM_OK;
        }
        int idx;
 
        res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO);
-       if (res == NULL)
+       if (!res)
                return NULL;
 
        INIT_LIST_HEAD(&res->lr_granted);
        __u64            version;
        int                   ns_refcount = 0;
 
-       LASSERT(ns != NULL);
-       LASSERT(parent == NULL);
-       LASSERT(ns->ns_rs_hash != NULL);
+       LASSERT(!parent);
+       LASSERT(ns->ns_rs_hash);
        LASSERT(name->name[0] != 0);
 
        cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
        hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
-       if (hnode != NULL) {
+       if (hnode) {
                cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
                res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
                /* Synchronize with regard to resource creation. */
        hnode = (version == cfs_hash_bd_version_get(&bd)) ?  NULL :
                cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
 
-       if (hnode != NULL) {
+       if (hnode) {
                /* Someone won the race and already added the resource. */
                cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
                /* Clean lu_ref for failed resource. */