goto err_free_blkg;
        }
 
-       wb_congested = wb_congested_get_create(&q->backing_dev_info,
+       wb_congested = wb_congested_get_create(q->backing_dev_info,
                                               blkcg->css.id,
                                               GFP_NOWAIT | __GFP_NOWARN);
        if (!wb_congested) {
 const char *blkg_dev_name(struct blkcg_gq *blkg)
 {
        /* some drivers (floppy) instantiate a queue w/o disk registered */
-       if (blkg->q->backing_dev_info.dev)
-               return dev_name(blkg->q->backing_dev_info.dev);
+       if (blkg->q->backing_dev_info->dev)
+               return dev_name(blkg->q->backing_dev_info->dev);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(blkg_dev_name);
 
         * flip its congestion state for events on other blkcgs.
         */
        if (rl == &rl->q->root_rl)
-               clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+               clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
 #endif
 }
 
 #else
        /* see blk_clear_congested() */
        if (rl == &rl->q->root_rl)
-               set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+               set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
 #endif
 }
 
 {
        struct request_queue *q = bdev_get_queue(bdev);
 
-       return &q->backing_dev_info;
+       return q->backing_dev_info;
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
        blk_flush_integrity();
 
        /* @q won't process any more request, flush async actions */
-       del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+       del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
        blk_sync_queue(q);
 
        if (q->mq_ops)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
 
-       bdi_unregister(&q->backing_dev_info);
+       bdi_unregister(q->backing_dev_info);
 
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
        if (!q->bio_split)
                goto fail_id;
 
-       q->backing_dev_info.ra_pages =
+       q->backing_dev_info = &q->_backing_dev_info;
+       q->backing_dev_info->ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
-       q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
-       q->backing_dev_info.name = "block";
+       q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
+       q->backing_dev_info->name = "block";
        q->node = node_id;
 
-       err = bdi_init(&q->backing_dev_info);
+       err = bdi_init(q->backing_dev_info);
        if (err)
                goto fail_split;
 
-       setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+       setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
        INIT_LIST_HEAD(&q->queue_head);
 fail_ref:
        percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
-       bdi_destroy(&q->backing_dev_info);
+       bdi_destroy(q->backing_dev_info);
 fail_split:
        bioset_free(q->bio_split);
 fail_id:
         * disturb iosched and blkcg but weird is bettern than dead.
         */
        printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
-                          __func__, dev_name(q->backing_dev_info.dev));
+                          __func__, dev_name(q->backing_dev_info->dev));
 
        rq->rq_flags &= ~RQF_ELVPRIV;
        rq->elv.icq = NULL;
        BUG_ON(blk_queued_rq(req));
 
        if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
-               laptop_io_completion(&req->q->backing_dev_info);
+               laptop_io_completion(req->q->backing_dev_info);
 
        blk_delete_timer(req);
 
 
                return;
 
        if (bi->profile)
-               disk->queue->backing_dev_info.capabilities |=
+               disk->queue->backing_dev_info->capabilities |=
                        BDI_CAP_STABLE_WRITES;
        else
-               disk->queue->backing_dev_info.capabilities &=
+               disk->queue->backing_dev_info->capabilities &=
                        ~BDI_CAP_STABLE_WRITES;
 }
 
 
        max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
        max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
        limits->max_sectors = max_sectors;
-       q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
+       q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
 
 
 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 {
-       unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+       unsigned long ra_kb = q->backing_dev_info->ra_pages <<
                                        (PAGE_SHIFT - 10);
 
        return queue_var_show(ra_kb, (page));
        if (ret < 0)
                return ret;
 
-       q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
+       q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
 
        return ret;
 }
 
        spin_lock_irq(q->queue_lock);
        q->limits.max_sectors = max_sectors_kb << 1;
-       q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
+       q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
        spin_unlock_irq(q->queue_lock);
 
        return ret;
                container_of(kobj, struct request_queue, kobj);
 
        wbt_exit(q);
-       bdi_exit(&q->backing_dev_info);
+       bdi_exit(q->backing_dev_info);
        blkcg_exit_queue(q);
 
        if (q->elevator) {
 
  */
 static bool wb_recent_wait(struct rq_wb *rwb)
 {
-       struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb;
+       struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
 
        return time_before(jiffies, wb->dirty_sleep + HZ);
 }
 
 static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 {
-       struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
        u64 thislat;
 
        /*
 
 static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 {
-       struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+       struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
 
        trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
                        rwb->wb_background, rwb->wb_normal, rwb->wb_max);
 
        status = latency_exceeded(rwb);
 
-       trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step,
+       trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
                        inflight);
 
        /*
 
        disk_alloc_events(disk);
 
        /* Register BDI before referencing it from bdev */
-       bdi = &disk->queue->backing_dev_info;
+       bdi = disk->queue->backing_dev_info;
        bdi_register_owner(bdi, disk_to_dev(disk));
 
        blk_register_region(disk_devt(disk), disk->minors, NULL,
 
        WARN_ON(d->gd);
        WARN_ON(d->flags & DEVFL_UP);
        blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
-       q->backing_dev_info.name = "aoe";
-       q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
+       q->backing_dev_info->name = "aoe";
+       q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
        d->bufpool = mp;
        d->blkq = gd->queue = q;
        q->queuedata = d;
 
 
        if (get_ldev(device)) {
                q = bdev_get_queue(device->ldev->backing_bdev);
-               r = bdi_congested(&q->backing_dev_info, bdi_bits);
+               r = bdi_congested(q->backing_dev_info, bdi_bits);
                put_ldev(device);
                if (r)
                        reason = 'b';
        /* we have no partitions. we contain only ourselves. */
        device->this_bdev->bd_contains = device->this_bdev;
 
-       q->backing_dev_info.congested_fn = drbd_congested;
-       q->backing_dev_info.congested_data = device;
+       q->backing_dev_info->congested_fn = drbd_congested;
+       q->backing_dev_info->congested_data = device;
 
        blk_queue_make_request(q, drbd_make_request);
        blk_queue_write_cache(q, true, true);
 
        if (b) {
                blk_queue_stack_limits(q, b);
 
-               if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+               if (q->backing_dev_info->ra_pages !=
+                   b->backing_dev_info->ra_pages) {
                        drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
-                                q->backing_dev_info.ra_pages,
-                                b->backing_dev_info.ra_pages);
-                       q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+                                q->backing_dev_info->ra_pages,
+                                b->backing_dev_info->ra_pages);
+                       q->backing_dev_info->ra_pages =
+                                               b->backing_dev_info->ra_pages;
                }
        }
        fixup_discard_if_not_supported(q);
                s->dev_disk_flags = md->flags;
                q = bdev_get_queue(device->ldev->backing_bdev);
                s->dev_lower_blocked =
-                       bdi_congested(&q->backing_dev_info,
+                       bdi_congested(q->backing_dev_info,
                                      (1 << WB_async_congested) |
                                      (1 << WB_sync_congested));
                put_ldev(device);
 
                        seq_printf(seq, "%2d: cs:Unconfigured\n", i);
                } else {
                        /* reset device->congestion_reason */
-                       bdi_rw_congested(&device->rq_queue->backing_dev_info);
+                       bdi_rw_congested(device->rq_queue->backing_dev_info);
 
                        nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
                        wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
 
 
        switch (rbm) {
        case RB_CONGESTED_REMOTE:
-               bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+               bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
                return bdi_read_congested(bdi);
        case RB_LEAST_PENDING:
                return atomic_read(&device->local_cnt) >
 
                        && pd->bio_queue_size <= pd->write_congestion_off);
        spin_unlock(&pd->lock);
        if (wakeup) {
-               clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+               clear_bdi_congested(pd->disk->queue->backing_dev_info,
                                        BLK_RW_ASYNC);
        }
 
        spin_lock(&pd->lock);
        if (pd->write_congestion_on > 0
            && pd->bio_queue_size >= pd->write_congestion_on) {
-               set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+               set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
                do {
                        spin_unlock(&pd->lock);
                        congestion_wait(BLK_RW_ASYNC, HZ);
 
        q->limits.discard_zeroes_data = 1;
 
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
-               q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+               q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
 
        disk->queue = q;
 
 
        struct request_queue *q = bdev_get_queue(dc->bdev);
        int ret = 0;
 
-       if (bdi_congested(&q->backing_dev_info, bits))
+       if (bdi_congested(q->backing_dev_info, bits))
                return 1;
 
        if (cached_dev_get(dc)) {
 
                for_each_cache(ca, d->c, i) {
                        q = bdev_get_queue(ca->bdev);
-                       ret |= bdi_congested(&q->backing_dev_info, bits);
+                       ret |= bdi_congested(q->backing_dev_info, bits);
                }
 
                cached_dev_put(dc);
        struct gendisk *g = dc->disk.disk;
 
        g->queue->make_request_fn               = cached_dev_make_request;
-       g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+       g->queue->backing_dev_info->congested_fn = cached_dev_congested;
        dc->disk.cache_miss                     = cached_dev_cache_miss;
        dc->disk.ioctl                          = cached_dev_ioctl;
 }
 
        for_each_cache(ca, d->c, i) {
                q = bdev_get_queue(ca->bdev);
-               ret |= bdi_congested(&q->backing_dev_info, bits);
+               ret |= bdi_congested(q->backing_dev_info, bits);
        }
 
        return ret;
        struct gendisk *g = d->disk;
 
        g->queue->make_request_fn               = flash_dev_make_request;
-       g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+       g->queue->backing_dev_info->congested_fn = flash_dev_congested;
        d->cache_miss                           = flash_dev_cache_miss;
        d->ioctl                                = flash_dev_ioctl;
 }
 
        blk_queue_make_request(q, NULL);
        d->disk->queue                  = q;
        q->queuedata                    = d;
-       q->backing_dev_info.congested_data = d;
+       q->backing_dev_info->congested_data = d;
        q->limits.max_hw_sectors        = UINT_MAX;
        q->limits.max_sectors           = UINT_MAX;
        q->limits.max_segment_size      = UINT_MAX;
        set_capacity(dc->disk.disk,
                     dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
 
-       dc->disk.disk->queue->backing_dev_info.ra_pages =
-               max(dc->disk.disk->queue->backing_dev_info.ra_pages,
-                   q->backing_dev_info.ra_pages);
+       dc->disk.disk->queue->backing_dev_info->ra_pages =
+               max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+                   q->backing_dev_info->ra_pages);
 
        bch_cached_dev_request_init(dc);
        bch_cached_dev_writeback_init(dc);
 
 static int is_congested(struct dm_dev *dev, int bdi_bits)
 {
        struct request_queue *q = bdev_get_queue(dev->bdev);
-       return bdi_congested(&q->backing_dev_info, bdi_bits);
+       return bdi_congested(q->backing_dev_info, bdi_bits);
 }
 
 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
 
 static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
 {
        struct request_queue *q = bdev_get_queue(dev->bdev);
-       return bdi_congested(&q->backing_dev_info, bdi_bits);
+       return bdi_congested(q->backing_dev_info, bdi_bits);
 }
 
 static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
 
                char b[BDEVNAME_SIZE];
 
                if (likely(q))
-                       r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+                       r |= bdi_congested(q->backing_dev_info, bdi_bits);
                else
                        DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
                                     dm_device_name(t->md),
 
                return 1;
 
        q = bdev_get_queue(pt->data_dev->bdev);
-       return bdi_congested(&q->backing_dev_info, bdi_bits);
+       return bdi_congested(q->backing_dev_info, bdi_bits);
 }
 
 static void requeue_bios(struct pool *pool)
 
                         * With request-based DM we only need to check the
                         * top-level queue for congestion.
                         */
-                       r = md->queue->backing_dev_info.wb.state & bdi_bits;
+                       r = md->queue->backing_dev_info->wb.state & bdi_bits;
                } else {
                        map = dm_get_live_table_fast(md);
                        if (map)
         * - must do so here (in alloc_dev callchain) before queue is used
         */
        md->queue->queuedata = md;
-       md->queue->backing_dev_info.congested_data = md;
+       md->queue->backing_dev_info->congested_data = md;
 }
 
 void dm_init_normal_md_queue(struct mapped_device *md)
        /*
         * Initialize aspects of queue that aren't relevant for blk-mq
         */
-       md->queue->backing_dev_info.congested_fn = dm_any_congested;
+       md->queue->backing_dev_info->congested_fn = dm_any_congested;
        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
 }
 
 
 
        for (i = 0; i < mddev->raid_disks && !ret ; i++) {
                struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
-               ret |= bdi_congested(&q->backing_dev_info, bits);
+               ret |= bdi_congested(q->backing_dev_info, bits);
        }
 
        return ret;
 
                        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
                else
                        queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
-               mddev->queue->backing_dev_info.congested_data = mddev;
-               mddev->queue->backing_dev_info.congested_fn = md_congested;
+               mddev->queue->backing_dev_info->congested_data = mddev;
+               mddev->queue->backing_dev_info->congested_fn = md_congested;
        }
        if (pers->sync_request) {
                if (mddev->kobj.sd &&
 
                __md_stop_writes(mddev);
                __md_stop(mddev);
-               mddev->queue->backing_dev_info.congested_fn = NULL;
+               mddev->queue->backing_dev_info->congested_fn = NULL;
 
                /* tell userspace to handle 'inactive' */
                sysfs_notify_dirent_safe(mddev->sysfs_state);
 
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 
-                       ret |= bdi_congested(&q->backing_dev_info, bits);
+                       ret |= bdi_congested(q->backing_dev_info, bits);
                        /* Just like multipath_map, we just check the
                         * first available device
                         */
 
        for (i = 0; i < raid_disks && !ret ; i++) {
                struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
 
-               ret |= bdi_congested(&q->backing_dev_info, bits);
+               ret |= bdi_congested(q->backing_dev_info, bits);
        }
        return ret;
 }
                 */
                int stripe = mddev->raid_disks *
                        (mddev->chunk_sectors << 9) / PAGE_SIZE;
-               if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
-                       mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+               if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+                       mddev->queue->backing_dev_info->ra_pages = 2* stripe;
        }
 
        dump_zones(mddev);
 
                         * non-congested targets, it can be removed
                         */
                        if ((bits & (1 << WB_async_congested)) || 1)
-                               ret |= bdi_congested(&q->backing_dev_info, bits);
+                               ret |= bdi_congested(q->backing_dev_info, bits);
                        else
-                               ret &= bdi_congested(&q->backing_dev_info, bits);
+                               ret &= bdi_congested(q->backing_dev_info, bits);
                }
        }
        rcu_read_unlock();
 
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 
-                       ret |= bdi_congested(&q->backing_dev_info, bits);
+                       ret |= bdi_congested(q->backing_dev_info, bits);
                }
        }
        rcu_read_unlock();
                 * maybe...
                 */
                stripe /= conf->geo.near_copies;
-               if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
-                       mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+               if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+                       mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
        }
 
        if (md_integrity_register(mddev))
                int stripe = conf->geo.raid_disks *
                        ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
                stripe /= conf->geo.near_copies;
-               if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
-                       conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+               if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+                       conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
        }
        conf->fullsync = 0;
 }
 
                mddev_suspend(mddev);
                conf->skip_copy = new;
                if (new)
-                       mddev->queue->backing_dev_info.capabilities |=
+                       mddev->queue->backing_dev_info->capabilities |=
                                BDI_CAP_STABLE_WRITES;
                else
-                       mddev->queue->backing_dev_info.capabilities &=
+                       mddev->queue->backing_dev_info->capabilities &=
                                ~BDI_CAP_STABLE_WRITES;
                mddev_resume(mddev);
        }
                int data_disks = conf->previous_raid_disks - conf->max_degraded;
                int stripe = data_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-               if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
-                       mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+               if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+                       mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
 
                chunk_size = mddev->chunk_sectors << 9;
                blk_queue_io_min(mddev->queue, chunk_size);
                        int data_disks = conf->raid_disks - conf->max_degraded;
                        int stripe = data_disks * ((conf->chunk_sectors << 9)
                                                   / PAGE_SIZE);
-                       if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
-                               conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+                       if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+                               conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
                }
        }
 }
 
         * We set the bdi here to the queue backing, file systems can
         * overwrite this in ->fill_super()
         */
-       s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+       s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
        return 0;
 }
 
 
        sb->s_time_gran = 1;
        sb->s_max_links = NILFS_LINK_MAX;
 
-       sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+       sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
 
        err = load_nilfs(nilfs, sb);
        if (err)
 
         * We set the bdi here to the queue backing, file systems can
         * overwrite this in ->fill_super()
         */
-       s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+       s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
        return 0;
 }
 
 
         */
        struct delayed_work     delay_work;
 
-       struct backing_dev_info backing_dev_info;
+       struct backing_dev_info *backing_dev_info;
+       struct backing_dev_info _backing_dev_info;
 
        /*
         * The queue owner gets to use this for whatever they like.
 
         * We want to write everything out, not just down to the dirty
         * threshold
         */
-       if (!bdi_has_dirty_io(&q->backing_dev_info))
+       if (!bdi_has_dirty_io(q->backing_dev_info))
                return;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
+       list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
                if (wb_has_dirty_io(wb))
                        wb_start_writeback(wb, nr_pages, true,
                                           WB_REASON_LAPTOP_TIMER);