return nand_exec_op(chip, &op);
 }
 
+static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
+                                         unsigned int offset_in_page, void *buf,
+                                         unsigned int len, bool check_only)
+{
+       const struct nand_interface_config *conf =
+               nand_get_interface_config(chip);
+       u8 addrs[5];
+       struct nand_op_instr start_instrs[] = {
+               NAND_OP_CMD(NAND_CMD_READ0, 0),
+               NAND_OP_ADDR(4, addrs, 0),
+               NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+               NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
+               NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+               NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+                                NAND_COMMON_TIMING_NS(conf, tRR_min)),
+               NAND_OP_DATA_IN(len, buf, 0),
+       };
+       struct nand_op_instr cont_instrs[] = {
+               NAND_OP_CMD(page == chip->cont_read.last_page ?
+                           NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
+                           NAND_COMMON_TIMING_NS(conf, tWB_max)),
+               NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+                                NAND_COMMON_TIMING_NS(conf, tRR_min)),
+               NAND_OP_DATA_IN(len, buf, 0),
+       };
+       struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs);
+       struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs);
+       int ret;
+
+       if (!len) {
+               start_op.ninstrs--;
+               cont_op.ninstrs--;
+       }
+
+       ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+       if (ret < 0)
+               return ret;
+
+       addrs[2] = page;
+       addrs[3] = page >> 8;
+
+       if (chip->options & NAND_ROW_ADDR_3) {
+               addrs[4] = page >> 16;
+               start_instrs[1].ctx.addr.naddrs++;
+       }
+
+       /* Check if cache reads are supported */
+       if (check_only) {
+               if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op))
+                       return -EOPNOTSUPP;
+
+               return 0;
+       }
+
+       if (page == chip->cont_read.first_page)
+               return nand_exec_op(chip, &start_op);
+       else
+               return nand_exec_op(chip, &cont_op);
+}
+
+static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
+{
+       return chip->cont_read.ongoing &&
+               page >= chip->cont_read.first_page &&
+               page <= chip->cont_read.last_page;
+}
+
 /**
  * nand_read_page_op - Do a READ PAGE operation
  * @chip: The NAND chip
                return -EINVAL;
 
        if (nand_has_exec_op(chip)) {
-               if (mtd->writesize > 512)
-                       return nand_lp_exec_read_page_op(chip, page,
-                                                        offset_in_page, buf,
-                                                        len);
+               if (mtd->writesize > 512) {
+                       if (rawnand_cont_read_ongoing(chip, page))
+                               return nand_lp_exec_cont_read_page_op(chip, page,
+                                                                     offset_in_page,
+                                                                     buf, len, false);
+                       else
+                               return nand_lp_exec_read_page_op(chip, page,
+                                                                offset_in_page, buf,
+                                                                len);
+               }
 
                return nand_sp_exec_read_page_op(chip, page, offset_in_page,
                                                 buf, len);
        return NULL;
 }
 
+static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
+                                     u32 readlen, int col)
+{
+       struct mtd_info *mtd = nand_to_mtd(chip);
+
+       if (!chip->controller->supported_op.cont_read)
+               return;
+
+       if ((col && col + readlen < (3 * mtd->writesize)) ||
+           (!col && readlen < (2 * mtd->writesize))) {
+               chip->cont_read.ongoing = false;
+               return;
+       }
+
+       chip->cont_read.ongoing = true;
+       chip->cont_read.first_page = page;
+       if (col)
+               chip->cont_read.first_page++;
+       chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask);
+}
+
 /**
  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
  * @chip: NAND chip object
        oob = ops->oobbuf;
        oob_required = oob ? 1 : 0;
 
+       rawnand_enable_cont_reads(chip, page, readlen, col);
+
        while (1) {
                struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
 
        rawnand_check_data_only_read_support(chip);
 }
 
+static void rawnand_check_cont_read_support(struct nand_chip *chip)
+{
+       struct mtd_info *mtd = nand_to_mtd(chip);
+
+       if (chip->read_retries)
+               return;
+
+       if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL,
+                                           mtd->writesize, true))
+               chip->controller->supported_op.cont_read = 1;
+}
+
 static void rawnand_late_check_supported_ops(struct nand_chip *chip)
 {
        /* The supported_op fields should not be set by individual drivers */
+       WARN_ON_ONCE(chip->controller->supported_op.cont_read);
 
        if (!nand_has_exec_op(chip))
                return;
+
+       rawnand_check_cont_read_support(chip);
 }
 
 /*