/* BTREE CACHE */
struct bio_set btree_bio;
+ struct workqueue_struct *io_complete_wq;
struct btree_root btree_roots[BTREE_ID_NR];
struct mutex btree_root_lock;
struct btree_key_cache btree_key_cache;
- struct workqueue_struct *wq;
+ struct workqueue_struct *btree_update_wq;
/* copygc needs its own workqueue for index updates.. */
struct workqueue_struct *copygc_wq;
bch2_latency_acct(ca, rb->start_time, READ);
}
- queue_work(system_unbound_wq, &rb->work);
+ queue_work(c->io_complete_wq, &rb->work);
}
struct btree_node_read_all {
closure_sync(&ra->cl);
btree_node_read_all_replicas_done(&ra->cl);
} else {
- continue_at(&ra->cl, btree_node_read_all_replicas_done, system_unbound_wq);
+ continue_at(&ra->cl, btree_node_read_all_replicas_done,
+ c->io_complete_wq);
}
return 0;
if (sync)
btree_node_read_work(&rb->work);
else
- queue_work(system_unbound_wq, &rb->work);
+ queue_work(c->io_complete_wq, &rb->work);
}
}
bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
- queue_work(c->wq, &c->btree_write_error_work);
+ queue_work(c->btree_update_wq, &c->btree_write_error_work);
return;
}
container_of(orig, struct btree_write_bio, wbio);
INIT_WORK(&wb->work, btree_node_write_work);
- queue_work(system_unbound_wq, &wb->work);
+ queue_work(c->io_complete_wq, &wb->work);
}
}
atomic64_add(sectors_to_write, &c->btree_writes_sectors);
INIT_WORK(&wbio->work, btree_write_submit);
- schedule_work(&wbio->work);
+ queue_work(c->io_complete_wq, &wbio->work);
return;
err:
set_btree_node_noevict(b);
bch2_btree_reserve_put(as);
- continue_at(&as->cl, btree_update_set_nodes_written, system_freezable_wq);
+ continue_at(&as->cl, btree_update_set_nodes_written,
+ as->c->btree_interior_update_worker);
}
struct btree_update *
a->seq = b->data->keys.seq;
INIT_WORK(&a->work, async_btree_node_rewrite_work);
- queue_work(system_long_wq, &a->work);
+ queue_work(c->btree_interior_update_worker, &a->work);
}
static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_migrate_read_done(&op->write, rbio);
closure_init(cl, NULL);
- closure_call(&op->write.op.cl, bch2_write, c->wq, cl);
+ closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
closure_return_with_destructor(cl, promote_done);
}
if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
goto csum_err;
+ /*
+ * XXX
+ * We need to rework the narrow_crcs path to deliver the read completion
+ * first, and then punt to a different workqueue, otherwise we're
+ * holding up reads while doing btree updates which is bad for memory
+ * reclaim.
+ */
if (unlikely(rbio->narrow_crcs))
bch2_rbio_narrow_crcs(rbio);
{
return op->alloc_reserve == RESERVE_MOVINGGC
? op->c->copygc_wq
- : op->c->wq;
+ : op->c->btree_update_wq;
}
int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
void __bch2_journal_buf_put(struct journal *j)
{
- closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+ closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
}
/*
j->res_get_blocked_start);
j->res_get_blocked_start = 0;
- mod_delayed_work(system_freezable_wq,
+ mod_delayed_work(c->io_complete_wq,
&j->write_work,
msecs_to_jiffies(j->write_delay_ms));
journal_wake(j);
journal_wake(j);
if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
- mod_delayed_work(system_freezable_wq, &j->write_work, 0);
+ mod_delayed_work(c->io_complete_wq, &j->write_work, 0);
spin_unlock(&j->lock);
if (new.unwritten_idx != new.idx &&
!journal_state_count(new, new.unwritten_idx))
- closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
+ closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
}
static void journal_write_endio(struct bio *bio)
le64_to_cpu(w->data->seq);
}
- continue_at(cl, journal_write_done, system_highpri_wq);
+ continue_at(cl, journal_write_done, c->io_complete_wq);
return;
}
journal_debug_buf);
kfree(journal_debug_buf);
bch2_fatal_error(c);
- continue_at(cl, journal_write_done, system_highpri_wq);
+ continue_at(cl, journal_write_done, c->io_complete_wq);
return;
}
bch2_bucket_seq_cleanup(c);
- continue_at(cl, do_journal_write, system_highpri_wq);
+ continue_at(cl, do_journal_write, c->io_complete_wq);
return;
no_io:
bch2_bucket_seq_cleanup(c);
- continue_at(cl, journal_write_done, system_highpri_wq);
+ continue_at(cl, journal_write_done, c->io_complete_wq);
return;
err:
bch2_inconsistent_error(c);
- continue_at(cl, journal_write_done, system_highpri_wq);
+ continue_at(cl, journal_write_done, c->io_complete_wq);
}
kfree(c->unused_inode_hints);
free_heap(&c->copygc_heap);
+ if (c->io_complete_wq )
+ destroy_workqueue(c->io_complete_wq );
if (c->copygc_wq)
destroy_workqueue(c->copygc_wq);
- if (c->wq)
- destroy_workqueue(c->wq);
+ if (c->btree_update_wq)
+ destroy_workqueue(c->btree_update_wq);
bch2_free_super(&c->disk_sb);
kvpfree(c, sizeof(*c));
c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
- if (!(c->wq = alloc_workqueue("bcachefs",
+ if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
+ !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
+ WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||