};
struct bch_page_state {
+ spinlock_t lock;
atomic_t write_count;
struct bch_page_sector s[PAGE_SECTORS];
};
if (!s)
return NULL;
+ spin_lock_init(&s->lock);
/*
* migrate_page_move_mapping() assumes that pages with private data
* have their count elevated by 1.
if (!s)
return;
+ EBUG_ON(!PageLocked(page));
+ EBUG_ON(PageWriteback(page));
+
for (i = 0; i < ARRAY_SIZE(s->s); i++) {
disk_res.sectors += s->s[i].replicas_reserved;
s->s[i].replicas_reserved = 0;
WARN_ON((u64) page_offset(page) + offset + len >
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
+ spin_lock(&s->lock);
+
for (i = round_down(offset, block_bytes(c)) >> 9;
i < round_up(offset + len, block_bytes(c)) >> 9;
i++) {
s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
}
+ spin_unlock(&s->lock);
+
if (dirty_sectors)
i_sectors_acct(c, inode, &res->quota, dirty_sectors);
bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
- /* XXX: this can't take locks that are held while we allocate memory */
- EBUG_ON(!PageLocked(&folio->page));
- EBUG_ON(folio_test_writeback(folio));
-
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return false;
SetPageError(bvec->bv_page);
mapping_set_error(bvec->bv_page->mapping, -EIO);
- lock_page(bvec->bv_page);
- s = bch2_page_state(bvec->bv_page);
+ s = __bch2_page_state(bvec->bv_page);
+ spin_lock(&s->lock);
for (i = 0; i < PAGE_SECTORS; i++)
s->s[i].nr_replicas = 0;
- unlock_page(bvec->bv_page);
+ spin_unlock(&s->lock);
}
}
folio_lock(folio);
s = bch2_page_state(&folio->page);
- if (s)
+ if (s) {
+ spin_lock(&s->lock);
for (j = 0; j < PAGE_SECTORS; j++)
s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
folio_unlock(folio);
}