struct nvm_rq *rqd;
        int rq_size;
 
-       if (rw == WRITE) {
+       if (rw == PBLK_WRITE) {
                pool = pblk->w_rq_pool;
                rq_size = pblk_w_rq_size;
        } else {
 {
        mempool_t *pool;
 
-       if (rw == WRITE)
+       if (rw == PBLK_WRITE)
                pool = pblk->w_rq_pool;
        else
                pool = pblk->r_rq_pool;
        int ret;
        DECLARE_COMPLETION_ONSTACK(wait);
 
-       if (dir == WRITE) {
+       if (dir == PBLK_WRITE) {
                bio_op = REQ_OP_WRITE;
                cmd_op = NVM_OP_PWRITE;
-       } else if (dir == READ) {
+       } else if (dir == PBLK_READ) {
                bio_op = REQ_OP_READ;
                cmd_op = NVM_OP_PREAD;
        } else
        rqd.end_io = pblk_end_io_sync;
        rqd.private = &wait;
 
-       if (dir == WRITE) {
+       if (dir == PBLK_WRITE) {
                struct pblk_sec_meta *meta_list = rqd.meta_list;
 
-               rqd.flags = pblk_set_progr_mode(pblk, WRITE);
+               rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
                for (i = 0; i < rqd.nr_ppas; ) {
                        spin_lock(&line->lock);
                        paddr = __pblk_alloc_page(pblk, line, min);
        reinit_completion(&wait);
 
        if (rqd.error) {
-               if (dir == WRITE)
+               if (dir == PBLK_WRITE)
                        pblk_log_write_err(pblk, &rqd);
                else
                        pblk_log_read_err(pblk, &rqd);
        int flags;
        DECLARE_COMPLETION_ONSTACK(wait);
 
-       if (dir == WRITE) {
+       if (dir == PBLK_WRITE) {
                bio_op = REQ_OP_WRITE;
                cmd_op = NVM_OP_PWRITE;
-               flags = pblk_set_progr_mode(pblk, WRITE);
+               flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
                lba_list = emeta_to_lbas(pblk, line->emeta->buf);
-       } else if (dir == READ) {
+       } else if (dir == PBLK_READ) {
                bio_op = REQ_OP_READ;
                cmd_op = NVM_OP_PREAD;
                flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
 
                rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 
-               if (dir == WRITE) {
+               if (dir == PBLK_WRITE) {
                        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 
                        meta_list[i].lba = lba_list[paddr] = addr_empty;
        atomic_dec(&pblk->inflight_io);
 
        if (rqd.error) {
-               if (dir == WRITE)
+               if (dir == PBLK_WRITE)
                        pblk_log_write_err(pblk, &rqd);
                else
                        pblk_log_read_err(pblk, &rqd);
 {
        u64 bpaddr = pblk_line_smeta_start(pblk, line);
 
-       return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
+       return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
 }
 
 int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
                         void *emeta_buf)
 {
        return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
-                                               line->emeta_ssec, READ);
+                                               line->emeta_ssec, PBLK_READ);
 }
 
 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
        rqd->opcode = NVM_OP_ERASE;
        rqd->ppa_addr = ppa;
        rqd->nr_ppas = 1;
-       rqd->flags = pblk_set_progr_mode(pblk, ERASE);
+       rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
        rqd->bio = NULL;
 }
 
        line->smeta_ssec = off;
        line->cur_sec = off + lm->smeta_sec;
 
-       if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
+       if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
                pr_debug("pblk: line smeta I/O failed. Retry\n");
                return 1;
        }
 
        atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
 #endif
 
-       pblk_free_rqd(pblk, rqd, READ);
+       pblk_free_rqd(pblk, rqd, PBLK_READ);
        atomic_dec(&pblk->inflight_io);
 }
 
 
        bitmap_zero(&read_bitmap, nr_secs);
 
-       rqd = pblk_alloc_rqd(pblk, READ);
+       rqd = pblk_alloc_rqd(pblk, PBLK_READ);
 
        rqd->opcode = NVM_OP_PREAD;
        rqd->bio = bio;
        return NVM_IO_OK;
 
 fail_rqd_free:
-       pblk_free_rqd(pblk, rqd, READ);
+       pblk_free_rqd(pblk, rqd, PBLK_READ);
        return ret;
 }
 
 
 
 err:
        bio_put(bio);
-       pblk_free_rqd(pblk, rqd, WRITE);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 }
 
 int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
        struct pblk_c_ctx *rec_ctx;
        int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
 
-       rec_rqd = pblk_alloc_rqd(pblk, WRITE);
+       rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
        rec_ctx = nvm_rq_to_pdu(rec_rqd);
 
        /* Copy completion bitmap, but exclude the first X completed entries */
        pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
 
        nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-       pblk_free_rqd(pblk, rqd, WRITE);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 
        atomic_dec(&pblk->inflight_io);
        kref_put(&pad_rq->ref, pblk_recov_complete);
        bio->bi_iter.bi_sector = 0; /* internal bio */
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
-       rqd = pblk_alloc_rqd(pblk, WRITE);
+       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
 
        rqd->bio = bio;
        rqd->opcode = NVM_OP_PWRITE;
-       rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+       rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
        rqd->meta_list = meta_list;
        rqd->nr_ppas = rq_ppas;
        rqd->ppa_list = ppa_list;
                goto free_meta_list;
        }
 
-       rqd = pblk_alloc_rqd(pblk, READ);
+       rqd = pblk_alloc_rqd(pblk, PBLK_READ);
 
        p.ppa_list = ppa_list;
        p.meta_list = meta_list;
 
        nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
 
        bio_put(rqd->bio);
-       pblk_free_rqd(pblk, rqd, WRITE);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 
        return ret;
 }
                                                GFP_ATOMIC, pblk->close_wq);
 
        nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-       pblk_free_rqd(pblk, rqd, READ);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 
        atomic_dec(&pblk->inflight_io);
 }
        /* Setup write request */
        rqd->opcode = NVM_OP_PWRITE;
        rqd->nr_ppas = nr_secs;
-       rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+       rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
        rqd->private = pblk;
        rqd->end_io = end_io;
 
        pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
 
        rqd->ppa_status = (u64)0;
-       rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+       rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
 
        return ret;
 }
        int i, j;
        int ret;
 
-       rqd = pblk_alloc_rqd(pblk, READ);
+       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
 
        m_ctx = nvm_rq_to_pdu(rqd);
        m_ctx->private = meta_line;
 fail_free_bio:
        bio_put(bio);
 fail_free_rqd:
-       pblk_free_rqd(pblk, rqd, READ);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
        return ret;
 }
 
        bio->bi_iter.bi_sector = 0; /* internal bio */
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
-       rqd = pblk_alloc_rqd(pblk, WRITE);
+       rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
        rqd->bio = bio;
 
        if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
        pblk_free_write_rqd(pblk, rqd);
 fail_put_bio:
        bio_put(bio);
-       pblk_free_rqd(pblk, rqd, WRITE);
+       pblk_free_rqd(pblk, rqd, PBLK_WRITE);
 
        return 1;
 }
 
                for ((i) = 0, rlun = &(pblk)->luns[0]; \
                        (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
 
-#define ERASE 2 /* READ = 0, WRITE = 1 */
-
 /* Static pool sizes */
 #define PBLK_GEN_WS_POOL_SIZE (2)
 
+enum {
+       PBLK_READ               = READ,
+       PBLK_WRITE              = WRITE,/* Write from write buffer */
+       PBLK_WRITE_INT,                 /* Internal write - no write buffer */
+       PBLK_ERASE,
+};
+
 enum {
        /* IO Types */
        PBLK_IOTYPE_USER        = 1 << 0,
 
        flags = geo->plane_mode >> 1;
 
-       if (type == WRITE)
+       if (type == PBLK_WRITE)
                flags |= NVM_IO_SCRAMBLE_ENABLE;
 
        return flags;