mmc->ops->request(mmc, hsq->mrq);
}
+static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
+{
+ struct mmc_host *mmc = hsq->mmc;
+ struct mmc_request *mrq;
+ unsigned int tag, need_change = 0;
+
+ mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+ for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+ mrq = hsq->slot[tag].mrq;
+ if (mrq && mrq->data &&
+ (mrq->data->blksz * mrq->data->blocks == 4096) &&
+ (mrq->data->flags & MMC_DATA_WRITE) &&
+ (++need_change == 2)) {
+ mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
+ break;
+ }
+ }
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
return;
}
+ mmc_hsq_modify_threshold(hsq);
+
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
* flight to avoid a long latency.
*/
#define HSQ_NORMAL_DEPTH 2
+/*
+ * For 4k random writes, we allow hsq_depth to increase to 5
+ * for better performance.
+ */
+#define HSQ_PERFORMANCE_DEPTH 5
struct hsq_slot {
struct mmc_request *mrq;