Rename submit_metadata_vio() to vdo_submit_metadata_vio().
Reviewed-by: Susan LeGendre-McGhee <slegendr@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
cache->outstanding_reads++;
ADD_ONCE(cache->stats.pages_loaded, 1);
callback = (cache->rebuilding ? handle_rebuild_read_error : handle_load_error);
- submit_metadata_vio(info->vio, pbn, load_cache_page_endio,
- callback, REQ_OP_READ | REQ_PRIO);
+ vdo_submit_metadata_vio(info->vio, pbn, load_cache_page_endio,
+ callback, REQ_OP_READ | REQ_PRIO);
return VDO_SUCCESS;
}
if (!page->header.initialized) {
page->header.initialized = true;
- submit_metadata_vio(info->vio, info->pbn,
- write_cache_page_endio,
- handle_page_write_error,
- (REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH));
+ vdo_submit_metadata_vio(info->vio, info->pbn,
+ write_cache_page_endio,
+ handle_page_write_error,
+ REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH);
return;
}
continue;
}
ADD_ONCE(info->cache->stats.pages_saved, 1);
- submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio,
- handle_page_write_error, REQ_OP_WRITE | REQ_PRIO);
+ vdo_submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio,
+ handle_page_write_error, REQ_OP_WRITE | REQ_PRIO);
}
if (has_unflushed_pages) {
if (zone->flusher == tree_page)
operation |= REQ_PREFLUSH;
- submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page),
- write_page_endio, handle_write_error,
- operation);
+ vdo_submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ operation);
}
static void write_page_endio(struct bio *bio)
}
page->header.initialized = true;
- submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page),
- write_page_endio, handle_write_error,
- REQ_OP_WRITE | REQ_PRIO);
+ vdo_submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ REQ_OP_WRITE | REQ_PRIO);
}
/* Release a lock on a page which was being loaded or allocated. */
physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn;
pooled->vio.completion.parent = data_vio;
- submit_metadata_vio(&pooled->vio, pbn, load_page_endio,
- handle_io_error, REQ_OP_READ | REQ_PRIO);
+ vdo_submit_metadata_vio(&pooled->vio, pbn, load_page_endio,
+ handle_io_error, REQ_OP_READ | REQ_PRIO);
}
/*
next_level->page_index = entry_index;
next_level->slot = 0;
level->slot++;
- submit_metadata_vio(&cursor->vio->vio, location.pbn,
- traversal_endio, continue_traversal,
- REQ_OP_READ | REQ_PRIO);
+ vdo_submit_metadata_vio(&cursor->vio->vio, location.pbn,
+ traversal_endio, continue_traversal,
+ REQ_OP_READ | REQ_PRIO);
return;
}
}
}
/**
- * vdo_submit_metadata_io() - Submit I/O for a metadata vio.
+ * __submit_metadata_vio() - Submit I/O for a metadata vio.
* @vio: the vio for which to issue I/O
* @physical: the physical block number to read or write
* @callback: the bio endio function which will be called after the I/O completes
* no error can occur on the bio queue. Currently this is true for all callers, but additional care
* will be needed if this ever changes.
*/
-void vdo_submit_metadata_io(struct vio *vio, physical_block_number_t physical,
- bio_end_io_t callback, vdo_action_fn error_handler,
- unsigned int operation, char *data)
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ unsigned int operation, char *data)
{
- struct vdo_completion *completion = &vio->completion;
int result;
+ struct vdo_completion *completion = &vio->completion;
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
void submit_data_vio_io(struct data_vio *data_vio);
-void vdo_submit_metadata_io(struct vio *vio, physical_block_number_t physical,
- bio_end_io_t callback, vdo_action_fn error_handler,
- unsigned int operation, char *data);
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ unsigned int operation, char *data);
-static inline void submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
- bio_end_io_t callback, vdo_action_fn error_handler,
- unsigned int operation)
+static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ unsigned int operation)
{
- vdo_submit_metadata_io(vio, physical, callback, error_handler,
- operation, vio->data);
+ __submit_metadata_vio(vio, physical, callback, error_handler,
+ operation, vio->data);
}
static inline void submit_flush_vio(struct vio *vio, bio_end_io_t callback,
vdo_action_fn error_handler)
{
/* FIXME: Can we just use REQ_OP_FLUSH? */
- vdo_submit_metadata_io(vio, 0, callback, error_handler,
- REQ_OP_WRITE | REQ_PREFLUSH, NULL);
+ __submit_metadata_vio(vio, 0, callback, error_handler,
+ REQ_OP_WRITE | REQ_PREFLUSH, NULL);
}
#endif /* VDO_IO_SUBMITTER_H */
* the data being referenced is stable. The FUA is necessary to ensure that the journal
* block itself is stable before allowing overwrites of the lbn's previous data.
*/
- submit_metadata_vio(&block->vio, journal->origin + block->block_number,
- complete_write_endio, handle_write_error, WRITE_FLAGS);
+ vdo_submit_metadata_vio(&block->vio, journal->origin + block->block_number,
+ complete_write_endio, handle_write_error, WRITE_FLAGS);
}
remaining -= blocks;
}
- for (vio_count = 0;
- vio_count < repair->vio_count;
+ for (vio_count = 0; vio_count < repair->vio_count;
vio_count++, pbn += MAX_BLOCKS_PER_VIO) {
- submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio,
- handle_journal_load_error, REQ_OP_READ);
+ vdo_submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio,
+ handle_journal_load_error, REQ_OP_READ);
}
}
pbn = (depot->summary_origin +
(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) +
block->index);
- submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio,
- handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH);
+ vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio,
+ handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH);
}
/**
* This block won't be read in recovery until the slab summary is updated to refer to it.
* The slab summary update does a flush which is sufficient to protect us from VDO-2331.
*/
- submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio,
- complete_write, REQ_OP_WRITE);
+ vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio,
+ complete_write, REQ_OP_WRITE);
/* Since the write is submitted, the tail block structure can be reused. */
journal->tail++;
block->slab->allocator->ref_counts_statistics.blocks_written + 1);
completion->callback_thread_id = ((struct block_allocator *) pooled->context)->thread_id;
- submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio,
- handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH);
+ vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio,
+ handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH);
}
static void reclaim_journal_space(struct slab_journal *journal)
size_t block_offset = (block - block->slab->reference_blocks);
vio->completion.parent = block;
- submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
- load_reference_block_endio, handle_io_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
+ load_reference_block_endio, handle_io_error,
+ REQ_OP_READ);
}
/**
vio->completion.parent = journal;
vio->completion.callback_thread_id = slab->allocator->thread_id;
- submit_metadata_vio(vio, slab->journal_origin + tail_block,
- read_slab_journal_tail_endio, handle_load_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block,
+ read_slab_journal_tail_endio, handle_load_error,
+ REQ_OP_READ);
}
/**
return;
}
- submit_metadata_vio(&scrubber->vio, slab->journal_origin,
- read_slab_journal_endio, handle_scrubber_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin,
+ read_slab_journal_endio, handle_scrubber_error,
+ REQ_OP_READ);
}
/**
combine_summaries(depot);
/* Write the combined summary back out. */
- submit_metadata_vio(as_vio(completion), depot->summary_origin,
- write_summary_endio, handle_combining_error,
- REQ_OP_WRITE);
+ vdo_submit_metadata_vio(as_vio(completion), depot->summary_origin,
+ write_summary_endio, handle_combining_error,
+ REQ_OP_WRITE);
}
static void load_summary_endio(struct bio *bio)
return;
}
- submit_metadata_vio(vio, depot->summary_origin, load_summary_endio,
- handle_combining_error, REQ_OP_READ);
+ vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio,
+ handle_combining_error, REQ_OP_READ);
}
/* Implements vdo_zone_action_fn. */
}
vdo->super_block.vio.completion.parent = parent;
- submit_metadata_vio(&vdo->super_block.vio,
- vdo_get_data_region_start(vdo->geometry),
- read_super_block_endio,
- handle_super_block_read_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio(&vdo->super_block.vio,
+ vdo_get_data_region_start(vdo->geometry),
+ read_super_block_endio,
+ handle_super_block_read_error,
+ REQ_OP_READ);
}
/**
vdo_encode_super_block(super_block->buffer, &vdo->states);
super_block->vio.completion.parent = parent;
super_block->vio.completion.callback_thread_id = parent->callback_thread_id;
- submit_metadata_vio(&super_block->vio,
- vdo_get_data_region_start(vdo->geometry),
- super_block_write_endio, handle_save_error,
- REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
+ vdo_submit_metadata_vio(&super_block->vio,
+ vdo_get_data_region_start(vdo->geometry),
+ super_block_write_endio, handle_save_error,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
}
/**