return ret;
}
-void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
+void __bch2_increment_clock(struct io_clock *clock)
{
- struct io_clock *clock = &c->io_clock[rw];
struct io_timer *timer;
unsigned long now;
+ unsigned sectors;
/* Buffer up one megabyte worth of IO in the percpu counter */
preempt_disable();
- if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
- IO_CLOCK_PCPU_SECTORS)) {
+ if (this_cpu_read(*clock->pcpu_buf) < IO_CLOCK_PCPU_SECTORS) {
preempt_enable();
return;
}
void bch2_io_timer_del(struct io_clock *, struct io_timer *);
void bch2_kthread_io_clock_wait(struct io_clock *, unsigned long,
unsigned long);
-void bch2_increment_clock(struct bch_fs *, unsigned, int);
+
+void __bch2_increment_clock(struct io_clock *);
+
+static inline void bch2_increment_clock(struct bch_fs *c, unsigned sectors,
+ int rw)
+{
+ struct io_clock *clock = &c->io_clock[rw];
+
+ if (unlikely(this_cpu_add_return(*clock->pcpu_buf, sectors) >=
+ IO_CLOCK_PCPU_SECTORS))
+ __bch2_increment_clock(clock);
+}
void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long);