#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- /* buffer is used during PIO of one scatterlist segment, and
- * is updated along with buffer_bytes_left. bytes_left applies
- * to all N blocks of the PIO transfer.
- */
- u8 *buffer;
- u32 buffer_bytes_left;
u32 bytes_left;
struct dma_chan *dma_tx;
bool active_request;
/* For PIO we walk scatterlists one segment at a time. */
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
-static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
-{
- host->buffer_bytes_left = sg_dma_len(host->sg);
- host->buffer = sg_virt(host->sg);
- if (host->buffer_bytes_left > host->bytes_left)
- host->buffer_bytes_left = host->bytes_left;
-}
-
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
+ size_t sglen;
u8 *p;
unsigned int i;
- if (host->buffer_bytes_left == 0) {
- host->sg = sg_next(host->data->sg);
- mmc_davinci_sg_to_buf(host);
+ /*
+ * By adjusting sgm->consumed this will give a pointer to the
+ * current index into the sgm.
+ */
+ if (!sg_miter_next(sgm)) {
+ dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n");
+ return;
}
-
- p = host->buffer;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
- host->buffer_bytes_left -= n;
- host->bytes_left -= n;
+ p = sgm->addr;
+ sglen = sgm->length;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
p = p + (n & 3);
}
}
- host->buffer = p;
+
+ sgm->consumed = n;
+ host->bytes_left -= n;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
/* Configure the FIFO */
if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
} else {
+ flags |= SG_MITER_TO_SG;
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
host->base + DAVINCI_MMCFIFOCTL);
}
- host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
- host->sg = host->data->sg;
- mmc_davinci_sg_to_buf(host);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
}
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
+ if (!host->do_dma)
+ sg_miter_stop(&host->sg_miter);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
- if ((host->do_dma == 0) && (host->bytes_left > 0)) {
- /* if datasize < rw_threshold
- * no RX ints are generated
- */
- davinci_fifo_data_trans(host, host->bytes_left);
+ if (!host->do_dma) {
+ if (host->bytes_left > 0)
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ sg_miter_stop(&host->sg_miter);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;