struct completion        pl_kobj_unregister;
 };
 
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
 
 /**
  * LVB operations.
        /** Limit of parallel AST RPC count. */
        unsigned                ns_max_parallel_ast;
 
-       /** Callback to cancel locks before replaying it during recovery. */
-       ldlm_cancel_for_recovery ns_cancel_for_recovery;
+       /**
+        * Callback to check if a lock is good to be canceled by ELC or
+        * during recovery.
+        */
+       ldlm_cancel_cbt         ns_cancel;
 
        /** LDLM lock stats */
        struct lprocfs_stats    *ns_stats;
 }
 
 static inline void ns_register_cancel(struct ldlm_namespace *ns,
-                                     ldlm_cancel_for_recovery arg)
+                                     ldlm_cancel_cbt arg)
 {
-       ns->ns_cancel_for_recovery = arg;
+       ns->ns_cancel = arg;
 }
 
 struct ldlm_lock;
 
                                                    int count)
 {
        ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
-       ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
 
        /* don't check added & count since we want to process all locks
         * from unused list.
        switch (lock->l_resource->lr_type) {
        case LDLM_EXTENT:
        case LDLM_IBITS:
-               if (cb && cb(lock))
+                       if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
                        break;
        default:
                result = LDLM_POLICY_SKIP_LOCK;
        /* Stop when SLV is not yet come from server or lv is smaller than
         * it is.
         */
-       return (slv == 0 || lv < slv) ?
-               LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+       if (slv == 0 || lv < slv)
+               return LDLM_POLICY_KEEP_LOCK;
+
+       if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+               return LDLM_POLICY_KEEP_LOCK;
+
+       return LDLM_POLICY_CANCEL_LOCK;
 }
 
 /**
                                                 int unused, int added,
                                                 int count)
 {
-       /* Stop LRU processing if young lock is found and we reach past count */
-       return ((added >= count) &&
-               time_before(cfs_time_current(),
-                           cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
-               LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+       if (added >= count)
+               return LDLM_POLICY_KEEP_LOCK;
+
+       if (time_before(cfs_time_current(),
+                       cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+               return LDLM_POLICY_KEEP_LOCK;
+
+       if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+               return LDLM_POLICY_KEEP_LOCK;
+
+       return LDLM_POLICY_CANCEL_LOCK;
 }
 
 /**
 
  * recovery, non zero value will be return if the lock can be canceled,
  * or zero returned for not
  */
-static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+static int mdc_cancel_weight(struct ldlm_lock *lock)
 {
        if (lock->l_resource->lr_type != LDLM_IBITS)
                return 0;
        sptlrpc_lprocfs_cliobd_attach(obd);
        ptlrpc_lprocfs_register_obd(obd);
 
-       ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+       ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
 
        obd->obd_namespace->ns_lvbo = &inode_lvbo;
 
 
 {
        struct cl_page *page = ops->ops_cl.cpl_page;
 
-       if (cl_page_is_vmlocked(env, page)) {
+       if (cl_page_is_vmlocked(env, page) ||
+           PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
+          ) {
                (*(unsigned long *)cbdata)++;
                return CLP_GANG_ABORT;
        }
 
        if (*flags & LDLM_FL_TEST_LOCK)
                return -ENOLCK;
        if (intent) {
-               LIST_HEAD(cancels);
-
                req = ptlrpc_request_alloc(class_exp2cliimp(exp),
                                           &RQF_LDLM_ENQUEUE_LVB);
                if (!req)
                        return -ENOMEM;
 
-               rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
-               if (rc) {
+               rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
+               if (rc < 0) {
                        ptlrpc_request_free(req);
                        return rc;
                }
  * \retval zero the lock can't be canceled
  * \retval other ok to cancel
  */
-static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+static int osc_cancel_weight(struct ldlm_lock *lock)
 {
        /*
-        * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
-        *
-        * XXX as a future improvement, we can also cancel unused write lock
-        * if it doesn't have dirty data and active mmaps.
+        * Cancel all unused and granted extent lock.
         */
        if (lock->l_resource->lr_type == LDLM_EXTENT &&
-           (lock->l_granted_mode == LCK_PR ||
-            lock->l_granted_mode == LCK_CR) && osc_ldlm_weigh_ast(lock) == 0)
+           lock->l_granted_mode == lock->l_req_mode &&
+           osc_ldlm_weigh_ast(lock) == 0)
                return 1;
 
        return 0;
        }
 
        INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
-       ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
+       ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
        return rc;
 
 out_ptlrpcd_work: