And move the definition from vvp_internal.h to llite_internal.h.
Signed-off-by: John L. Hammond <john.hammond@intel.com>
Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Reviewed-on: http://review.whamcloud.com/13714
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-by: James Simmons <uja.ornl@gmail.com>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
        /* clear group lock, if present */
        if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
-               ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+               ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
 
        if (fd->fd_lease_och) {
                bool lease_broken;
 {
        struct ll_inode_info   *lli = ll_i2info(inode);
        struct ll_file_data    *fd = LUSTRE_FPRIVATE(file);
-       struct ccc_grouplock    grouplock;
+       struct ll_grouplock    grouplock;
        int                  rc;
 
        if (arg == 0) {
        spin_lock(&lli->lli_lock);
        if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
                CWARN("group lock already existed with gid %lu\n",
-                     fd->fd_grouplock.cg_gid);
+                     fd->fd_grouplock.lg_gid);
                spin_unlock(&lli->lli_lock);
                return -EINVAL;
        }
-       LASSERT(!fd->fd_grouplock.cg_lock);
+       LASSERT(!fd->fd_grouplock.lg_lock);
        spin_unlock(&lli->lli_lock);
 
        rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
 {
        struct ll_inode_info   *lli = ll_i2info(inode);
        struct ll_file_data    *fd = LUSTRE_FPRIVATE(file);
-       struct ccc_grouplock    grouplock;
+       struct ll_grouplock    grouplock;
 
        spin_lock(&lli->lli_lock);
        if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
                CWARN("no group lock held\n");
                return -EINVAL;
        }
-       LASSERT(fd->fd_grouplock.cg_lock);
+       LASSERT(fd->fd_grouplock.lg_lock);
 
-       if (fd->fd_grouplock.cg_gid != arg) {
+       if (fd->fd_grouplock.lg_gid != arg) {
                CWARN("group lock %lu doesn't match current id %lu\n",
-                     arg, fd->fd_grouplock.cg_gid);
+                     arg, fd->fd_grouplock.lg_gid);
                spin_unlock(&lli->lli_lock);
                return -EINVAL;
        }
 
 #include "../include/obd.h"
 #include "../include/cl_object.h"
 
-#include "vvp_internal.h"
 #include "../include/lustre_lite.h"
+#include "llite_internal.h"
 
 /* Initialize the default and maximum LOV EA and cookie sizes.  This allows
  * us to make MDS RPCs with large enough reply buffers to hold the
 #define GROUPLOCK_SCOPE "grouplock"
 
 int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
-                    struct ccc_grouplock *cg)
+                    struct ll_grouplock *cg)
 {
        struct lu_env     *env;
        struct cl_io       *io;
                return rc;
        }
 
-       cg->cg_env  = cl_env_get(&refcheck);
-       cg->cg_io   = io;
-       cg->cg_lock = lock;
-       cg->cg_gid  = gid;
-       LASSERT(cg->cg_env == env);
+       cg->lg_env  = cl_env_get(&refcheck);
+       cg->lg_io   = io;
+       cg->lg_lock = lock;
+       cg->lg_gid  = gid;
+       LASSERT(cg->lg_env == env);
 
        cl_env_unplant(env, &refcheck);
        return 0;
 }
 
-void cl_put_grouplock(struct ccc_grouplock *cg)
+void cl_put_grouplock(struct ll_grouplock *cg)
 {
-       struct lu_env  *env  = cg->cg_env;
-       struct cl_io   *io   = cg->cg_io;
-       struct cl_lock *lock = cg->cg_lock;
+       struct lu_env  *env  = cg->lg_env;
+       struct cl_io   *io   = cg->lg_io;
+       struct cl_lock *lock = cg->lg_lock;
        int          refcheck;
 
-       LASSERT(cg->cg_env);
-       LASSERT(cg->cg_gid);
+       LASSERT(cg->lg_env);
+       LASSERT(cg->lg_gid);
 
        cl_env_implant(env, &refcheck);
        cl_env_put(env, &refcheck);
 
                                             */
 };
 
+struct ll_grouplock {
+       struct lu_env   *lg_env;
+       struct cl_io    *lg_io;
+       struct cl_lock  *lg_lock;
+       unsigned long    lg_gid;
+};
+
 enum lli_flags {
        /* MDS has an authority for the Size-on-MDS attributes. */
        LLIF_MDS_SIZE_LOCK      = (1 << 0),
 struct lustre_handle;
 struct ll_file_data {
        struct ll_readahead_state fd_ras;
-       struct ccc_grouplock fd_grouplock;
+       struct ll_grouplock fd_grouplock;
        __u64 lfd_pos;
        __u32 fd_flags;
        fmode_t fd_omode;
 
 void ll_ras_enter(struct file *f);
 
+/* llite/lcommon_misc.c */
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+                    struct ll_grouplock *cg);
+void cl_put_grouplock(struct ll_grouplock *cg);
+
 /* llite/lproc_llite.c */
 int ldebugfs_register_mountpoint(struct dentry *parent,
                                 struct super_block *sb, char *osc, char *mdc);
 
                  struct obd_device *watched,
                  enum obd_notify_event ev, void *owner, void *data);
 
-struct ccc_grouplock {
-       struct lu_env   *cg_env;
-       struct cl_io    *cg_io;
-       struct cl_lock  *cg_lock;
-       unsigned long    cg_gid;
-};
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
-                    struct ccc_grouplock *cg);
-void cl_put_grouplock(struct ccc_grouplock *cg);
-
 /**
  * New interfaces to get and put lov_stripe_md from lov layer. This violates
  * layering because lov_stripe_md is supposed to be a private data in lov.
 
 
        if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
                descr->cld_mode = CLM_GROUP;
-               descr->cld_gid  = vio->vui_fd->fd_grouplock.cg_gid;
+               descr->cld_gid  = vio->vui_fd->fd_grouplock.lg_gid;
        } else {
                descr->cld_mode  = mode;
        }