bio->bi_opf = opf;
bio->bi_flags = 0;
bio->bi_ioprio = 0;
- bio->bi_write_hint = 0;
bio->bi_status = 0;
bio->bi_iter.bi_sector = 0;
bio->bi_iter.bi_size = 0;
bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_ioprio = bio_src->bi_ioprio;
- bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio_clone_blkg_association(bio, bio_src);
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
- bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
!blk_write_same_mergeable(req->bio, next->bio))
return NULL;
- /*
- * Don't allow merge of different write hints, or for a hint with
- * non-hint IO.
- */
- if (req->write_hint != next->write_hint)
- return NULL;
-
if (req->ioprio != next->ioprio)
return NULL;
!blk_write_same_mergeable(rq->bio, bio))
return false;
- /*
- * Don't allow merge of different write hints, or for a hint with
- * non-hint IO.
- */
- if (rq->write_hint != bio->bi_write_hint)
- return false;
-
if (rq->ioprio != bio_prio(bio))
return false;
return count;
}
-static int queue_write_hint_show(void *data, struct seq_file *m)
-{
- struct request_queue *q = data;
- int i;
-
- for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
- seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
-
- return 0;
-}
-
-static ssize_t queue_write_hint_store(void *data, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct request_queue *q = data;
- int i;
-
- for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
- q->write_hints[i] = 0;
-
- return count;
-}
-
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
{ "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
- { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
{ },
};
rq->cmd_flags |= REQ_FAILFAST_MASK;
rq->__sector = bio->bi_iter.bi_sector;
- rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
bio->bi_ioprio = bio_src->bi_ioprio;
- bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
}
bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
- bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current;
bio.bi_end_io = blkdev_bio_end_io_simple;
bio.bi_ioprio = iocb->ki_ioprio;
for (;;) {
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
- bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio;
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
dio->flags = 0;
dio->iocb = iocb;
bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
- bio->bi_write_hint = iocb->ki_hint;
bio->bi_end_io = blkdev_bio_end_io_async;
bio->bi_ioprio = iocb->ki_ioprio;
goto skip_copy;
}
- behind_bio->bi_write_hint = bio->bi_write_hint;
-
while (i < vcnt && size) {
struct page *page;
int len = min_t(int, PAGE_SIZE, size);
bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
- bio->bi_write_hint = ppl_conf->write_hint;
pr_debug("%s: log->current_io_sector: %llu\n", __func__,
(unsigned long long)log->next_io_sector);
bio = bio_alloc_bioset(prev->bi_bdev, BIO_MAX_VECS,
prev->bi_opf, GFP_NOIO,
&ppl_conf->bs);
- bio->bi_write_hint = prev->bi_write_hint;
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
atomic64_set(&ppl_conf->seq, 0);
INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
- ppl_conf->write_hint = RWH_WRITE_LIFE_NOT_SET;
if (!mddev->external) {
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
static ssize_t
ppl_write_hint_show(struct mddev *mddev, char *buf)
{
- size_t ret = 0;
- struct r5conf *conf;
- struct ppl_conf *ppl_conf = NULL;
-
- spin_lock(&mddev->lock);
- conf = mddev->private;
- if (conf && raid5_has_ppl(conf))
- ppl_conf = conf->log_private;
- ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
- spin_unlock(&mddev->lock);
-
- return ret;
+ return sprintf(buf, "%d\n", 0);
}
static ssize_t
ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
{
struct r5conf *conf;
- struct ppl_conf *ppl_conf;
int err = 0;
unsigned short new;
return err;
conf = mddev->private;
- if (!conf) {
+ if (!conf)
err = -ENODEV;
- } else if (raid5_has_ppl(conf)) {
- ppl_conf = conf->log_private;
- if (!ppl_conf)
- err = -EINVAL;
- else
- ppl_conf->write_hint = new;
- } else {
+ else if (!raid5_has_ppl(conf) || !conf->log_private)
err = -EINVAL;
- }
mddev_unlock(mddev);
bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
bi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
- bi->bi_write_hint = sh->dev[i].write_hint;
- if (!rrdev)
- sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset;
rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
- rbi->bi_write_hint = sh->dev[i].write_hint;
- sh->dev[i].write_hint = RWH_WRITE_LIFE_NOT_SET;
/*
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
(unsigned long long)sh->sector);
spin_lock_irq(&sh->stripe_lock);
- sh->dev[dd_idx].write_hint = bi->bi_write_hint;
/* Don't allow new IO added to stripes in batch list */
if (sh->batch_head)
goto overlap;
bio_ctrl->bio_flags = bio_flags;
bio->bi_end_io = end_io_func;
bio->bi_private = &inode->io_tree;
- bio->bi_write_hint = inode->vfs_inode.i_write_hint;
bio->bi_opf = opf;
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
if (ret < 0)
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- enum rw_hint hint, struct writeback_control *wbc);
+ struct writeback_control *wbc);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
- inode->i_write_hint, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
nr_underway++;
}
bh = next;
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
- inode->i_write_hint, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
nr_underway++;
}
bh = next;
}
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
- enum rw_hint write_hint, struct writeback_control *wbc)
+ struct writeback_control *wbc)
{
struct bio *bio;
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_write_hint = write_hint;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
BUG_ON(bio->bi_iter.bi_size != bh->b_size);
int submit_bh(int op, int op_flags, struct buffer_head *bh)
{
- return submit_bh_wbc(op, op_flags, bh, 0, NULL);
+ return submit_bh_wbc(op, op_flags, bh, NULL);
}
EXPORT_SYMBOL(submit_bh);
bio->bi_end_io = dio_bio_end_aio;
else
bio->bi_end_io = dio_bio_end_io;
-
- bio->bi_write_hint = dio->iocb->ki_hint;
-
sdio->bio = bio;
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
}
if (bio) {
if (io->io_wbc->sync_mode == WB_SYNC_ALL)
io->io_bio->bi_opf |= REQ_SYNC;
- io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
submit_bio(io->io_bio);
}
io->io_bio = NULL;
submit_and_retry:
ext4_io_submit(io);
}
- if (io->io_bio == NULL) {
+ if (io->io_bio == NULL)
io_submit_init_bio(io, bh);
- io->io_bio->bi_write_hint = inode->i_write_hint;
- }
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
} else {
bio->bi_end_io = f2fs_write_end_io;
bio->bi_private = sbi;
- bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
- fio->type, fio->temp);
}
iostat_alloc_and_bind_ctx(sbi, bio, NULL);
new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_write_hint = prev->bi_write_hint;
bio_chain(new, prev);
submit_bio(prev);
return new;
REQ_OP_WRITE | wbc_to_write_flags(wbc),
GFP_NOFS, &iomap_ioend_bioset);
bio->bi_iter.bi_sector = sector;
- bio->bi_write_hint = inode->i_write_hint;
wbc_init_bio(wbc, bio);
ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_write_hint = prev->bi_write_hint;
bio_chain(prev, new);
bio_get(prev); /* for iomap_finish_ioend */
bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
- bio->bi_write_hint = dio->iocb->ki_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
GFP_NOFS);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
wbc_init_bio(wbc, bio);
- bio->bi_write_hint = inode->i_write_hint;
}
/*
bio = bio_alloc(bdev, nr_pages,
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = zi->i_zsector;
- bio->bi_write_hint = iocb->ki_hint;
bio->bi_ioprio = iocb->ki_ioprio;
if (iocb->ki_flags & IOCB_DSYNC)
bio->bi_opf |= REQ_FUA;
*/
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
blk_status_t bi_status;
atomic_t __bi_remaining;
bool mq_sysfs_init_done;
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
-
/*
* Independent sector access ranges. This is always NULL for
* devices that do not have multiple independent access ranges.