*/
int
xfs_bmapi_convert_delalloc(
- struct xfs_trans *tp,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb,
int whichfork,
- struct xfs_bmbt_irec *imap)
+ xfs_fileoff_t offset_fsb,
+ struct xfs_bmbt_irec *imap,
+ unsigned int *seq)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_mount *mp = ip->i_mount;
struct xfs_bmalloca bma = { NULL };
+ struct xfs_trans *tp;
int error;
+ /*
+ * Space for the extent and indirect blocks was reserved when the
+ * delalloc extent was created so there's no need to do so here.
+ */
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
+ XFS_TRANS_RESERVE, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
bma.got.br_startoff > offset_fsb) {
/*
* might have moved the extent to the data fork in the meantime.
*/
WARN_ON_ONCE(whichfork != XFS_COW_FORK);
- return -EAGAIN;
+ error = -EAGAIN;
+ goto out_trans_cancel;
}
/*
*/
if (!isnullstartblock(bma.got.br_startblock)) {
*imap = bma.got;
- return 0;
+ *seq = READ_ONCE(ifp->if_seq);
+ goto out_trans_cancel;
}
bma.tp = tp;
ASSERT(!isnullstartblock(bma.got.br_startblock));
*imap = bma.got;
+ *seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK) {
error = xfs_refcount_alloc_cow_extent(tp, bma.blkno,
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
whichfork);
+ if (error)
+ goto out_finish;
+
+ xfs_bmapi_finish(&bma, whichfork, 0);
+ error = xfs_trans_commit(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+
out_finish:
xfs_bmapi_finish(&bma, whichfork, error);
+out_trans_cancel:
+ xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof);
-int xfs_bmapi_convert_delalloc(struct xfs_trans *, struct xfs_inode *,
- xfs_fileoff_t, int, struct xfs_bmbt_irec *);
+int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
+ xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap,
+ unsigned int *seq);
static inline void
xfs_bmap_add_free(
unsigned int *seq)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t offset_fsb;
xfs_fileoff_t map_start_fsb;
xfs_extlen_t map_count_fsb;
- struct xfs_trans *tp;
int error = 0;
/*
/*
* Allocate in a loop because it may take several attempts to
* allocate real blocks for a contiguous delalloc extent if free
- * space is sufficiently fragmented. Note that space for the
- * extent and indirect blocks was reserved when the delalloc
- * extent was created so there's no need to do so here.
+ * space is sufficiently fragmented.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
- XFS_TRANS_RESERVE, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
/*
* ilock was dropped since imap was populated which means it
* caller. We'll trim it down to the caller's most recently
* validated range before we return.
*/
- error = xfs_bmapi_convert_delalloc(tp, ip, offset_fsb,
- whichfork, imap);
- if (error)
- goto trans_cancel;
-
- error = xfs_trans_commit(tp);
+ error = xfs_bmapi_convert_delalloc(ip, whichfork, offset_fsb,
+ imap, seq);
if (error)
- goto error0;
-
- *seq = READ_ONCE(ifp->if_seq);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
/*
* See if we were able to allocate an extent that covers at
return 0;
}
}
-
-trans_cancel:
- xfs_trans_cancel(tp);
-error0:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return error;
}
int