static u64 bch2_recalc_sectors_available(struct bch_fs *c)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
+ percpu_u64_set(&c->pcpu->sectors_available, 0);
return avail_factor(bch2_fs_sectors_free(c));
}
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
struct bch_replicas_cpu n;
- u64 v = 0;
- int cpu;
+ u64 v;
if (__replicas_has_entry(&c->replicas_gc, e))
continue;
- for_each_possible_cpu(cpu)
- v += *per_cpu_ptr(&c->usage[0]->data[i], cpu);
+ v = percpu_u64_get(&c->usage[0]->data[i]);
if (!v)
continue;
static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
{
struct printbuf out = _PBUF(buf, PAGE_SIZE);
- int rw, i, cpu;
+ int rw, i;
for (rw = 0; rw < 2; rw++) {
pr_buf(&out, "%s:\n", bch2_rw[rw]);
- for (i = 1; i < BCH_DATA_NR; i++) {
- u64 n = 0;
-
- for_each_possible_cpu(cpu)
- n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
-
+ for (i = 1; i < BCH_DATA_NR; i++)
pr_buf(&out, "%-12s:%12llu\n",
- bch2_data_types[i], n << 9);
- }
+ bch2_data_types[i],
+ percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
}
return out.pos - buf;
#include <linux/llist.h>
#include <linux/log2.h>
#include <linux/percpu.h>
+#include <linux/preempt.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
} \
} while (0)
+static inline u64 percpu_u64_get(u64 __percpu *src)
+{
+ u64 ret = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ ret += *per_cpu_ptr(src, cpu);
+ return ret;
+}
+
+static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(dst, cpu) = 0;
+
+ preempt_disable();
+ *this_cpu_ptr(dst) = src;
+ preempt_enable();
+}
+
static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
{
unsigned i;