{
struct md_rdev *rdev;
int idle;
- long long curr_events;
+ int curr_events;
idle = 1;
rcu_read_lock();
if (!init && !blk_queue_io_stat(disk->queue))
continue;
- curr_events =
- (long long)part_stat_read_accum(disk->part0, sectors) -
- atomic64_read(&disk->sync_io);
+ curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
+ atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
sector_t sectors; /* Device size (in 512bytes sectors) */
struct mddev *mddev; /* RAID array if running */
- long long last_events; /* IO event timestamp */
+ int last_events; /* IO event timestamp */
/*
* If meta_bdev is non-NULL, it means that a separate device is
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
if (blk_queue_io_stat(bdev->bd_disk->queue))
- atomic64_add(nr_sectors, &bdev->bd_disk->sync_io);
+ atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
- atomic64_t sync_io; /* RAID */
+ atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_ZONED