xfs_inode_ag_walk(
struct xfs_mount *mp,
struct xfs_perag *pag,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
+ int (*execute)(struct xfs_inode *ip, void *args),
void *args,
int tag,
int iter_flags)
if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
xfs_iflags_test(batch[i], XFS_INEW))
xfs_inew_wait(batch[i]);
- error = execute(batch[i], flags, args);
+ error = execute(batch[i], args);
xfs_irele(batch[i]);
if (error == -EAGAIN) {
skipped++;
xfs_inode_ag_iterator(
struct xfs_mount *mp,
int iter_flags,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
+ int (*execute)(struct xfs_inode *ip, void *args),
void *args,
int tag)
{
ag = 0;
while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+ error = xfs_inode_ag_walk(mp, pag, execute, args, tag,
iter_flags);
xfs_perag_put(pag);
if (error) {
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
- int flags,
void *args)
{
- int ret = 0;
- struct xfs_eofblocks *eofb = args;
- int match;
+ struct xfs_eofblocks *eofb = args;
+ bool wait;
+ int match;
+ int ret;
+
+ wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
* If the mapping is dirty the operation can block and wait for some
* time. Unless we are waiting, skip it.
*/
- if (!(flags & SYNC_WAIT) &&
- mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
+ if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
if (eofb) {
* scanner moving and revisit the inode in a subsequent pass.
*/
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
- if (flags & SYNC_WAIT)
- ret = -EAGAIN;
- return ret;
+ if (wait)
+ return -EAGAIN;
+ return 0;
}
+
ret = xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
__xfs_icache_free_eofblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
+ int (*execute)(struct xfs_inode *ip, void *args),
int tag)
{
- int flags = SYNC_TRYLOCK;
-
- if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
- flags = SYNC_WAIT;
-
- return xfs_inode_ag_iterator(mp, 0, execute, flags, eofb, tag);
+ return xfs_inode_ag_iterator(mp, 0, execute, eofb, tag);
}
int
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
- int flags,
void *args)
{
struct xfs_eofblocks *eofb = args;
void xfs_queue_cowblocks(struct xfs_mount *);
int xfs_inode_ag_iterator(struct xfs_mount *mp, int iter_flags,
- int (*execute)(struct xfs_inode *ip, int flags, void *args),
- int flags, void *args, int tag);
+ int (*execute)(struct xfs_inode *ip, void *args),
+ void *args, int tag);
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse);
STATIC int
xfs_dqrele_inode(
struct xfs_inode *ip,
- int flags,
void *args)
{
+ uint *flags = args;
+
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
+ if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
- if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
+ if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
- if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
+ if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
*/
void
xfs_qm_dqrele_all_inodes(
- struct xfs_mount *mp,
- uint flags)
+ struct xfs_mount *mp,
+ uint flags)
{
ASSERT(mp->m_quotainfo);
xfs_inode_ag_iterator(mp, XFS_AGITER_INEW_WAIT, xfs_dqrele_inode,
- flags, NULL, XFS_ICI_NO_TAG);
+ &flags, XFS_ICI_NO_TAG);
}