static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
 {
        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+       struct pblk_gc *gc = &pblk->gc;
 
        spin_lock(&line->lock);
        WARN_ON(line->state != PBLK_LINESTATE_GC);
        pblk_line_free(pblk, line);
        spin_unlock(&line->lock);
 
+       atomic_dec(&gc->pipeline_gc);
+
        spin_lock(&l_mg->free_lock);
        list_add_tail(&line->list, &l_mg->free_list);
        l_mg->nr_free_lines++;
 
        kfree(invalid_bitmap);
 
        kref_put(&line->ref, pblk_line_put);
-       atomic_dec(&gc->inflight_gc);
+       atomic_dec(&gc->read_inflight_gc);
 
        return;
 
 
        pblk_put_line_back(pblk, line);
        kref_put(&line->ref, pblk_line_put);
-       atomic_dec(&gc->inflight_gc);
+       atomic_dec(&gc->read_inflight_gc);
 
        pr_err("pblk: Failed to GC line %d\n", line->id);
 }
        line_ws->pblk = pblk;
        line_ws->line = line;
 
+       atomic_inc(&gc->pipeline_gc);
        INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
        queue_work(gc->gc_reader_wq, &line_ws->ws);
 
 void pblk_gc_free_full_lines(struct pblk *pblk)
 {
        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+       struct pblk_gc *gc = &pblk->gc;
        struct pblk_line *line;
 
        do {
                list_del(&line->list);
                spin_unlock(&l_mg->gc_lock);
 
+               atomic_inc(&gc->pipeline_gc);
                kref_put(&line->ref, pblk_line_put);
        } while (1);
 }
        struct pblk_line *line;
        struct list_head *group_list;
        bool run_gc;
-       int inflight_gc, gc_group = 0, prev_group = 0;
+       int read_inflight_gc, gc_group = 0, prev_group = 0;
 
        pblk_gc_free_full_lines(pblk);
 
        run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-       if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD))
+       if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
                return;
 
 next_gc_group:
                list_add_tail(&line->list, &gc->r_list);
                spin_unlock(&gc->r_lock);
 
-               inflight_gc = atomic_inc_return(&gc->inflight_gc);
+               read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
                pblk_gc_reader_kick(gc);
 
                prev_group = 1;
 
                /* No need to queue up more GC lines than we can handle */
                run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
-               if (!run_gc || inflight_gc >= PBLK_GC_L_QD)
+               if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
                        break;
        } while (1);
 
 static int pblk_gc_reader_ts(void *data)
 {
        struct pblk *pblk = data;
+       struct pblk_gc *gc = &pblk->gc;
 
        while (!kthread_should_stop()) {
                if (!pblk_gc_read(pblk))
                io_schedule();
        }
 
+#ifdef CONFIG_NVM_DEBUG
+       pr_info("pblk: flushing gc pipeline, %d lines left\n",
+               atomic_read(&gc->pipeline_gc));
+#endif
+
+       do {
+               if (!atomic_read(&gc->pipeline_gc))
+                       break;
+
+               schedule();
+       } while (1);
+
        return 0;
 }
 
        gc->gc_forced = 0;
        gc->gc_enabled = 1;
        gc->w_entries = 0;
-       atomic_set(&gc->inflight_gc, 0);
+       atomic_set(&gc->read_inflight_gc, 0);
+       atomic_set(&gc->pipeline_gc, 0);
 
        /* Workqueue that reads valid sectors from a line and submit them to the
         * GC writer to be recycled.
 
        sz += snprintf(page + sz, PAGE_SIZE - sz,
                "GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, queue:%d\n",
                        gc_full, gc_high, gc_mid, gc_low, gc_empty,
-                       atomic_read(&pblk->gc.inflight_gc));
+                       atomic_read(&pblk->gc.read_inflight_gc));
 
        sz += snprintf(page + sz, PAGE_SIZE - sz,
                "data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",