if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
return -E2BIG;
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
return ret;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
- return ret;
+ goto out;
need_unlock = true;
}
unlock:
if (need_unlock)
gfs2_glock_dq_uninit(&gh);
+out:
+ gfs2_qa_put(ip);
return ret;
}
inode_dio_wait(inode);
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
goto out;
sb_start_pagefault(inode->i_sb);
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
goto out;
out_unlock:
gfs2_glock_dq(&gh);
out_uninit:
+ gfs2_qa_put(ip);
gfs2_holder_uninit(&gh);
if (ret == 0) {
set_page_dirty(page);
gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
file->private_data = fp;
+ if (file->f_mode & FMODE_WRITE) {
+ ret = gfs2_qa_get(GFS2_I(inode));
+ if (ret)
+ goto fail;
+ }
return 0;
+
+fail:
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return ret;
}
/**
kfree(file->private_data);
file->private_data = NULL;
- if (!(file->f_mode & FMODE_WRITE))
- return 0;
-
- gfs2_rsqa_delete(ip, &inode->i_writecount);
+ if (file->f_mode & FMODE_WRITE)
+ gfs2_rsqa_delete(ip, &inode->i_writecount);
return 0;
}
struct gfs2_inode *ip = GFS2_I(inode);
ssize_t ret;
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
return ret;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret)
- return ret;
+ goto out;
gfs2_glock_dq_uninit(&gh);
}
out_unlock:
inode_unlock(inode);
+out:
+ gfs2_qa_put(ip);
return ret;
}
if (mode & FALLOC_FL_PUNCH_HOLE) {
ret = __gfs2_punch_hole(file, offset, len);
} else {
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
goto out_putw;
if (ret)
gfs2_rs_deltree(&ip->i_res);
+ gfs2_qa_put(ip);
}
out_putw:
{
int error;
struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
+ ssize_t ret;
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
return (ssize_t)error;
gfs2_size_hint(out, *ppos, len);
- return iter_file_splice_write(pipe, out, ppos, len, flags);
+ ret = iter_file_splice_write(pipe, out, ppos, len, flags);
+ gfs2_qa_put(ip);
+ return ret;
}
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
unsigned int qa_qd_num;
+ int qa_ref;
};
/* Resource group multi-block reservation, in order of appearance:
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
- error = gfs2_qa_alloc(dip);
+ error = gfs2_qa_get(dip);
if (error)
return error;
error = gfs2_rindex_update(sdp);
if (error)
- return error;
+ goto fail;
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
if (error)
goto fail_gunlock;
ip = GFS2_I(inode);
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
goto fail_free_acls;
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
gfs2_glock_put(io_gl);
fail_free_inode:
+ gfs2_qa_put(ip);
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
gfs2_glock_put(ip->i_gl);
if (gfs2_holder_initialized(ghs + 1))
gfs2_glock_dq_uninit(ghs + 1);
fail:
+ gfs2_qa_put(dip);
return error;
}
if (S_ISDIR(inode->i_mode))
return -EPERM;
- error = gfs2_qa_alloc(dip);
+ error = gfs2_qa_get(dip);
if (error)
return error;
out_child:
gfs2_glock_dq(ghs);
out_parent:
+ gfs2_qa_put(ip);
gfs2_holder_uninit(ghs);
gfs2_holder_uninit(ghs + 1);
return error;
if (error)
return error;
- error = gfs2_qa_alloc(ndip);
+ error = gfs2_qa_get(ndip);
if (error)
return error;
if (gfs2_holder_initialized(&r_gh))
gfs2_glock_dq_uninit(&r_gh);
out:
+ gfs2_qa_put(ndip);
return error;
}
ouid = nuid = NO_UID_QUOTA_CHANGE;
if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
ogid = ngid = NO_GID_QUOTA_CHANGE;
-
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
- goto out;
+ return error;
error = gfs2_rindex_update(sdp);
if (error)
out_gunlock_q:
gfs2_quota_unlock(ip);
out:
+ gfs2_qa_put(ip);
return error;
}
struct gfs2_holder i_gh;
int error;
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
return error;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
- return error;
+ goto out;
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- goto out;
+ goto error;
error = setattr_prepare(dentry, attr);
if (error)
- goto out;
+ goto error;
if (attr->ia_valid & ATTR_SIZE)
error = gfs2_setattr_size(inode, attr->ia_size);
error = posix_acl_chmod(inode, inode->i_mode);
}
-out:
+error:
if (!error)
mark_inode_dirty(inode);
gfs2_glock_dq_uninit(&i_gh);
+out:
+ gfs2_qa_put(ip);
return error;
}
}
/**
- * gfs2_qa_alloc - make sure we have a quota allocations data structure,
- * if necessary
+ * gfs2_qa_get - make sure we have a quota allocations data structure,
+ * if necessary
* @ip: the inode for this reservation
*/
-int gfs2_qa_alloc(struct gfs2_inode *ip)
+int gfs2_qa_get(struct gfs2_inode *ip)
{
int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
down_write(&ip->i_rw_mutex);
if (ip->i_qadata == NULL) {
ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
- if (!ip->i_qadata)
+ if (!ip->i_qadata) {
error = -ENOMEM;
+ goto out;
+ }
}
+ ip->i_qadata->qa_ref++;
+out:
up_write(&ip->i_rw_mutex);
return error;
}
-void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount)
+void gfs2_qa_put(struct gfs2_inode *ip)
{
down_write(&ip->i_rw_mutex);
- if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
+ if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL;
}
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- if (ip->i_qadata == NULL) {
- error = gfs2_qa_alloc(ip);
- if (error)
- return error;
- }
+ error = gfs2_qa_get(ip);
+ if (error)
+ return error;
qd = ip->i_qadata->qa_qd;
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
- gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
- return -EIO;
+ gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
+ error = -EIO;
+ goto out;
+ }
error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
if (error)
- goto out;
+ goto out_unhold;
ip->i_qadata->qa_qd_num++;
qd++;
error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
if (error)
- goto out;
+ goto out_unhold;
ip->i_qadata->qa_qd_num++;
qd++;
!uid_eq(uid, ip->i_inode.i_uid)) {
error = qdsb_get(sdp, make_kqid_uid(uid), qd);
if (error)
- goto out;
+ goto out_unhold;
ip->i_qadata->qa_qd_num++;
qd++;
}
!gid_eq(gid, ip->i_inode.i_gid)) {
error = qdsb_get(sdp, make_kqid_gid(gid), qd);
if (error)
- goto out;
+ goto out_unhold;
ip->i_qadata->qa_qd_num++;
qd++;
}
-out:
+out_unhold:
if (error)
gfs2_quota_unhold(ip);
+out:
return error;
}
if (ip->i_qadata == NULL)
return;
+
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
ip->i_qadata->qa_qd[x] = NULL;
}
ip->i_qadata->qa_qd_num = 0;
+ gfs2_qa_put(ip);
}
static int sort_qd(const void *a, const void *b)
unsigned int nalloc = 0, blocks;
int error;
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
return error;
&data_blocks, &ind_blocks);
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
- if (!ghs)
- return -ENOMEM;
+ if (!ghs) {
+ error = -ENOMEM;
+ goto out;
+ }
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
inode_lock(&ip->i_inode);
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]);
if (error)
- goto out;
+ goto out_dq;
}
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
- goto out;
+ goto out_dq;
for (x = 0; x < num_qd; x++) {
offset = qd2offset(qda[x]);
gfs2_inplace_release(ip);
out_alloc:
gfs2_glock_dq_uninit(&i_gh);
-out:
+out_dq:
while (qx--)
gfs2_glock_dq_uninit(&ghs[qx]);
inode_unlock(&ip->i_inode);
kfree(ghs);
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
+out:
+ gfs2_qa_put(ip);
return error;
}
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
+ BUG_ON(ip->i_qadata->qa_ref <= 0);
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
qd = ip->i_qadata->qa_qd[x];
if (error)
return error;
- error = gfs2_qa_alloc(ip);
+ error = gfs2_qa_get(ip);
if (error)
goto out_put;
out_q:
gfs2_glock_dq_uninit(&q_gh);
out_unlockput:
+ gfs2_qa_put(ip);
inode_unlock(&ip->i_inode);
out_put:
qd_put(qd);
#define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID
-extern int gfs2_qa_alloc(struct gfs2_inode *ip);
-extern void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount);
+extern int gfs2_qa_get(struct gfs2_inode *ip);
+extern void gfs2_qa_put(struct gfs2_inode *ip);
extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_unhold(struct gfs2_inode *ip);
if ((wcount == NULL) || (atomic_read(wcount) <= 1))
gfs2_rs_deltree(&ip->i_res);
up_write(&ip->i_rw_mutex);
- gfs2_qa_delete(ip, wcount);
+ gfs2_qa_put(ip);
}
/**
fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
out:
truncate_inode_pages_final(&inode->i_data);
+ if (ip->i_qadata)
+ gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
gfs2_rsqa_delete(ip, NULL);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
struct gfs2_holder gh;
int ret;
- ret = gfs2_qa_alloc(ip);
+ ret = gfs2_qa_get(ip);
if (ret)
return ret;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
- return ret;
+ goto out;
} else {
- if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
- return -EIO;
+ if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) {
+ ret = -EIO;
+ goto out;
+ }
gfs2_holder_mark_uninitialized(&gh);
}
ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
+out:
+ gfs2_qa_put(ip);
return ret;
}