summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorApelete Seketeli <apelete@seketeli.net>2014-05-04 18:42:36 (GMT)
committerApelete Seketeli <apelete@seketeli.net>2014-05-04 18:42:36 (GMT)
commit674a8eba035ab6df48d07add6875ddfa591079a1 (patch)
tree6d5b2ee9ebd8d8d3992657cb54d3996b088516d9
parent869c1758ed83f5b336c874db3c0fa4d02ea41459 (diff)
jz4740_mmc: flush and invalidate data cache for dma transfers
In order to preserve cache coherency during dma transfers, flush cache before write and invalidate after read. In order to invalidate cache after read, a dma descriptor callback is added to signal dma transfer completion. Cache is invalidated upon completion in the callback function. Signed-off-by: Apelete Seketeli <apelete@seketeli.net>
-rw-r--r--drivers/mmc/host/jz4740_mmc.c40
1 files changed, 37 insertions, 3 deletions
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 1b7bd20..98b2b0e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -140,9 +140,11 @@ struct jz4740_mmc_host {
struct sg_mapping_iter miter;
enum jz4740_mmc_state state;
- struct dma_chan *dma_rx;
- struct dma_chan *dma_tx;
- bool use_dma;
+ /* DMA support */
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct completion dma_complete;
+ bool use_dma;
#define JZ4740_MMC_DATADIR_NONE 0
#define JZ4740_MMC_DATADIR_READ 1
@@ -154,6 +156,28 @@ struct jz4740_mmc_host {
/*----------------------------------------------------------------------------*/
/* DMA infrastructure */
+static void jz4740_dma_complete(void *arg)
+{
+ struct jz4740_mmc_host *host = arg;
+ struct mmc_request *req = host->req;
+ struct mmc_data *data = host->req->cmd->data;
+
+ dev_warn(&host->pdev->dev, "DMA command completed\n");
+
+ if (WARN(!req || !req->data, "%s: NULL data in DMA completion!\n",
+ dev_name(&host->pdev->dev)))
+ return;
+
+ /* invalidate cache after read */
+ if (host->data_dir == JZ4740_MMC_DATADIR_READ) {
+ dma_cache_inv((u32) data->sg->dma_address,
+ sizeof(data->sg->dma_address));
+ dev_warn(mmc_dev(host->mmc), "Invalidate cache after read OK\n");
+ }
+
+ complete(&host->dma_complete);
+}
+
static int jz4740_send_dma_request(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
@@ -205,6 +229,16 @@ static int jz4740_send_dma_request(struct jz4740_mmc_host *host,
}
}
+ /* setup descriptor callback which signals dma completion */
+ desc->callback = jz4740_dma_complete;
+ desc->callback_param = host;
+
+ /* flush cache before write */
+ if (host->data_dir == JZ4740_MMC_DATADIR_WRITE) {
+ flush_cache_all();
+ dev_warn(mmc_dev(host->mmc), "Flush cache before write OK\n");
+ }
+
dmaengine_submit(desc);
dma_async_issue_pending(chan);