Lines Matching +full:clkdiv +full:- +full:-

1 // SPDX-License-Identifier: GPL-2.0
23 * request- and stage-specific handler methods.
39 #include <linux/dma-mapping.h>
46 #include <linux/mmc/slot-gpio.h>
240 u32 clkdiv_map; /* see CE_CLK_CTRL::CLKDIV */
250 { .compatible = "renesas,sh-mmcif" },
255 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
260 writel(val | readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitset()
266 writel(~val & readl(host->addr + reg), host->addr + reg); in sh_mmcif_bitclr()
272 struct mmc_request *mrq = host->mrq; in sh_mmcif_dma_complete()
277 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", in sh_mmcif_dma_complete()
281 complete(&host->dma_complete); in sh_mmcif_dma_complete()
286 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_rx()
287 struct scatterlist *sg = data->sg; in sh_mmcif_start_dma_rx()
289 struct dma_chan *chan = host->chan_rx; in sh_mmcif_start_dma_rx()
291 dma_cookie_t cookie = -EINVAL; in sh_mmcif_start_dma_rx()
294 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, in sh_mmcif_start_dma_rx()
297 host->dma_active = true; in sh_mmcif_start_dma_rx()
303 desc->callback = sh_mmcif_dma_complete; in sh_mmcif_start_dma_rx()
304 desc->callback_param = host; in sh_mmcif_start_dma_rx()
309 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", in sh_mmcif_start_dma_rx()
310 __func__, data->sg_len, ret, cookie); in sh_mmcif_start_dma_rx()
315 ret = -EIO; in sh_mmcif_start_dma_rx()
316 host->chan_rx = NULL; in sh_mmcif_start_dma_rx()
317 host->dma_active = false; in sh_mmcif_start_dma_rx()
320 chan = host->chan_tx; in sh_mmcif_start_dma_rx()
322 host->chan_tx = NULL; in sh_mmcif_start_dma_rx()
331 desc, cookie, data->sg_len); in sh_mmcif_start_dma_rx()
336 struct mmc_data *data = host->mrq->data; in sh_mmcif_start_dma_tx()
337 struct scatterlist *sg = data->sg; in sh_mmcif_start_dma_tx()
339 struct dma_chan *chan = host->chan_tx; in sh_mmcif_start_dma_tx()
341 dma_cookie_t cookie = -EINVAL; in sh_mmcif_start_dma_tx()
344 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, in sh_mmcif_start_dma_tx()
347 host->dma_active = true; in sh_mmcif_start_dma_tx()
353 desc->callback = sh_mmcif_dma_complete; in sh_mmcif_start_dma_tx()
354 desc->callback_param = host; in sh_mmcif_start_dma_tx()
359 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n", in sh_mmcif_start_dma_tx()
360 __func__, data->sg_len, ret, cookie); in sh_mmcif_start_dma_tx()
365 ret = -EIO; in sh_mmcif_start_dma_tx()
366 host->chan_tx = NULL; in sh_mmcif_start_dma_tx()
367 host->dma_active = false; in sh_mmcif_start_dma_tx()
370 chan = host->chan_rx; in sh_mmcif_start_dma_tx()
372 host->chan_rx = NULL; in sh_mmcif_start_dma_tx()
404 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); in sh_mmcif_dma_slave_config()
406 return -EINVAL; in sh_mmcif_dma_slave_config()
411 cfg.src_addr = res->start + MMCIF_CE_DATA; in sh_mmcif_dma_slave_config()
414 cfg.dst_addr = res->start + MMCIF_CE_DATA; in sh_mmcif_dma_slave_config()
424 host->dma_active = false; in sh_mmcif_request_dma()
427 if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) { in sh_mmcif_request_dma()
428 struct sh_mmcif_plat_data *pdata = dev->platform_data; in sh_mmcif_request_dma()
430 host->chan_tx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
431 pdata->slave_id_tx); in sh_mmcif_request_dma()
432 host->chan_rx = sh_mmcif_request_dma_pdata(host, in sh_mmcif_request_dma()
433 pdata->slave_id_rx); in sh_mmcif_request_dma()
435 host->chan_tx = dma_request_chan(dev, "tx"); in sh_mmcif_request_dma()
436 if (IS_ERR(host->chan_tx)) in sh_mmcif_request_dma()
437 host->chan_tx = NULL; in sh_mmcif_request_dma()
438 host->chan_rx = dma_request_chan(dev, "rx"); in sh_mmcif_request_dma()
439 if (IS_ERR(host->chan_rx)) in sh_mmcif_request_dma()
440 host->chan_rx = NULL; in sh_mmcif_request_dma()
442 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, in sh_mmcif_request_dma()
443 host->chan_rx); in sh_mmcif_request_dma()
445 if (!host->chan_tx || !host->chan_rx || in sh_mmcif_request_dma()
446 sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) || in sh_mmcif_request_dma()
447 sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM)) in sh_mmcif_request_dma()
453 if (host->chan_tx) in sh_mmcif_request_dma()
454 dma_release_channel(host->chan_tx); in sh_mmcif_request_dma()
455 if (host->chan_rx) in sh_mmcif_request_dma()
456 dma_release_channel(host->chan_rx); in sh_mmcif_request_dma()
457 host->chan_tx = host->chan_rx = NULL; in sh_mmcif_request_dma()
464 if (host->chan_tx) { in sh_mmcif_release_dma()
465 struct dma_chan *chan = host->chan_tx; in sh_mmcif_release_dma()
466 host->chan_tx = NULL; in sh_mmcif_release_dma()
469 if (host->chan_rx) { in sh_mmcif_release_dma()
470 struct dma_chan *chan = host->chan_rx; in sh_mmcif_release_dma()
471 host->chan_rx = NULL; in sh_mmcif_release_dma()
475 host->dma_active = false; in sh_mmcif_release_dma()
481 struct sh_mmcif_plat_data *p = dev->platform_data; in sh_mmcif_clock_control()
482 bool sup_pclk = p ? p->sup_pclk : false; in sh_mmcif_clock_control()
483 unsigned int current_clk = clk_get_rate(host->clk); in sh_mmcif_clock_control()
484 unsigned int clkdiv; in sh_mmcif_clock_control() local
492 if (host->clkdiv_map) { in sh_mmcif_clock_control()
496 clkdiv = 0; in sh_mmcif_clock_control()
499 for (i = 31; i >= 0; i--) { in sh_mmcif_clock_control()
500 if (!((1 << i) & host->clkdiv_map)) in sh_mmcif_clock_control()
505 * -> parent_freq = clk x div in sh_mmcif_clock_control()
509 freq = clk_round_rate(host->clk, clk * div); in sh_mmcif_clock_control()
511 diff = (myclk > clk) ? myclk - clk : clk - myclk; in sh_mmcif_clock_control()
515 clkdiv = i; in sh_mmcif_clock_control()
521 (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv); in sh_mmcif_clock_control()
523 clk_set_rate(host->clk, best_freq); in sh_mmcif_clock_control()
524 clkdiv = clkdiv << 16; in sh_mmcif_clock_control()
526 clkdiv = CLK_SUP_PCLK; in sh_mmcif_clock_control()
528 clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16; in sh_mmcif_clock_control()
531 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv); in sh_mmcif_clock_control()
539 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); in sh_mmcif_sync_reset()
541 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); in sh_mmcif_sync_reset()
542 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); in sh_mmcif_sync_reset()
543 if (host->ccs_enable) in sh_mmcif_sync_reset()
545 if (host->clk_ctrl2_enable) in sh_mmcif_sync_reset()
546 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); in sh_mmcif_sync_reset()
559 host->sd_error = false; in sh_mmcif_error_manage()
561 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); in sh_mmcif_error_manage()
562 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); in sh_mmcif_error_manage()
569 for (timeout = 10000; timeout; timeout--) { in sh_mmcif_error_manage()
570 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) in sh_mmcif_error_manage()
578 return -EIO; in sh_mmcif_error_manage()
582 return -EIO; in sh_mmcif_error_manage()
587 host->state, host->wait_for); in sh_mmcif_error_manage()
588 ret = -EIO; in sh_mmcif_error_manage()
591 host->state, host->wait_for); in sh_mmcif_error_manage()
592 ret = -ETIMEDOUT; in sh_mmcif_error_manage()
595 host->state, host->wait_for); in sh_mmcif_error_manage()
596 ret = -EIO; in sh_mmcif_error_manage()
604 struct mmc_data *data = mrq->data; in sh_mmcif_single_read()
606 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_read()
609 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, in sh_mmcif_single_read()
612 host->wait_for = MMCIF_WAIT_FOR_READ; in sh_mmcif_single_read()
620 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_read_block()
622 struct mmc_data *data = host->mrq->data; in sh_mmcif_read_block()
626 if (host->sd_error) { in sh_mmcif_read_block()
628 data->error = sh_mmcif_error_manage(host); in sh_mmcif_read_block()
629 dev_dbg(dev, "%s(): %d\n", __func__, data->error); in sh_mmcif_read_block()
639 p = sgm->addr; in sh_mmcif_read_block()
641 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_read_block()
642 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_read_block()
644 sg_miter_stop(&host->sg_miter); in sh_mmcif_read_block()
648 host->wait_for = MMCIF_WAIT_FOR_READ_END; in sh_mmcif_read_block()
656 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_multi_read()
657 struct mmc_data *data = mrq->data; in sh_mmcif_multi_read()
659 if (!data->sg_len || !data->sg->length) in sh_mmcif_multi_read()
662 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_read()
665 sg_miter_start(sgm, data->sg, data->sg_len, in sh_mmcif_multi_read()
674 host->wait_for = MMCIF_WAIT_FOR_MREAD; in sh_mmcif_multi_read()
681 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_mread_block()
683 struct mmc_data *data = host->mrq->data; in sh_mmcif_mread_block()
687 if (host->sd_error) { in sh_mmcif_mread_block()
689 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mread_block()
690 dev_dbg(dev, "%s(): %d\n", __func__, data->error); in sh_mmcif_mread_block()
694 p = sgm->addr; in sh_mmcif_mread_block()
696 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mread_block()
697 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); in sh_mmcif_mread_block()
699 sgm->consumed = host->blocksize; in sh_mmcif_mread_block()
714 struct mmc_data *data = mrq->data; in sh_mmcif_single_write()
716 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_single_write()
719 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, in sh_mmcif_single_write()
722 host->wait_for = MMCIF_WAIT_FOR_WRITE; in sh_mmcif_single_write()
730 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_write_block()
732 struct mmc_data *data = host->mrq->data; in sh_mmcif_write_block()
736 if (host->sd_error) { in sh_mmcif_write_block()
738 data->error = sh_mmcif_error_manage(host); in sh_mmcif_write_block()
739 dev_dbg(dev, "%s(): %d\n", __func__, data->error); in sh_mmcif_write_block()
749 p = sgm->addr; in sh_mmcif_write_block()
751 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_write_block()
752 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_write_block()
754 sg_miter_stop(&host->sg_miter); in sh_mmcif_write_block()
758 host->wait_for = MMCIF_WAIT_FOR_WRITE_END; in sh_mmcif_write_block()
766 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_multi_write()
767 struct mmc_data *data = mrq->data; in sh_mmcif_multi_write()
769 if (!data->sg_len || !data->sg->length) in sh_mmcif_multi_write()
772 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & in sh_mmcif_multi_write()
775 sg_miter_start(sgm, data->sg, data->sg_len, in sh_mmcif_multi_write()
784 host->wait_for = MMCIF_WAIT_FOR_MWRITE; in sh_mmcif_multi_write()
791 struct sg_mapping_iter *sgm = &host->sg_miter; in sh_mmcif_mwrite_block()
793 struct mmc_data *data = host->mrq->data; in sh_mmcif_mwrite_block()
797 if (host->sd_error) { in sh_mmcif_mwrite_block()
799 data->error = sh_mmcif_error_manage(host); in sh_mmcif_mwrite_block()
800 dev_dbg(dev, "%s(): %d\n", __func__, data->error); in sh_mmcif_mwrite_block()
804 p = sgm->addr; in sh_mmcif_mwrite_block()
806 for (i = 0; i < host->blocksize / 4; i++) in sh_mmcif_mwrite_block()
807 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); in sh_mmcif_mwrite_block()
809 sgm->consumed = host->blocksize; in sh_mmcif_mwrite_block()
824 if (cmd->flags & MMC_RSP_136) { in sh_mmcif_get_response()
825 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3); in sh_mmcif_get_response()
826 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2); in sh_mmcif_get_response()
827 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1); in sh_mmcif_get_response()
828 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
830 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0); in sh_mmcif_get_response()
836 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12); in sh_mmcif_get_cmd12response()
843 struct mmc_data *data = mrq->data; in sh_mmcif_set_cmd()
844 struct mmc_command *cmd = mrq->cmd; in sh_mmcif_set_cmd()
845 u32 opc = cmd->opcode; in sh_mmcif_set_cmd()
871 switch (host->bus_width) { in sh_mmcif_set_cmd()
885 switch (host->timing) { in sh_mmcif_set_cmd()
905 data->blocks << 16); in sh_mmcif_set_cmd()
943 return -EINVAL; in sh_mmcif_data_trans()
950 struct mmc_command *cmd = mrq->cmd; in sh_mmcif_start_cmd()
955 if (cmd->flags & MMC_RSP_BUSY) in sh_mmcif_start_cmd()
960 if (host->ccs_enable) in sh_mmcif_start_cmd()
963 if (mrq->data) { in sh_mmcif_start_cmd()
964 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); in sh_mmcif_start_cmd()
965 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, in sh_mmcif_start_cmd()
966 mrq->data->blksz); in sh_mmcif_start_cmd()
970 if (host->ccs_enable) in sh_mmcif_start_cmd()
971 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); in sh_mmcif_start_cmd()
973 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); in sh_mmcif_start_cmd()
974 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); in sh_mmcif_start_cmd()
976 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); in sh_mmcif_start_cmd()
978 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_start_cmd()
979 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); in sh_mmcif_start_cmd()
981 host->wait_for = MMCIF_WAIT_FOR_CMD; in sh_mmcif_start_cmd()
982 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_start_cmd()
983 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_start_cmd()
991 switch (mrq->cmd->opcode) { in sh_mmcif_stop_cmd()
1000 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_stop_cmd()
1004 host->wait_for = MMCIF_WAIT_FOR_STOP; in sh_mmcif_stop_cmd()
1013 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_request()
1014 if (host->state != STATE_IDLE) { in sh_mmcif_request()
1016 __func__, host->state); in sh_mmcif_request()
1017 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
1018 mrq->cmd->error = -EAGAIN; in sh_mmcif_request()
1023 host->state = STATE_REQUEST; in sh_mmcif_request()
1024 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_request()
1026 host->mrq = mrq; in sh_mmcif_request()
1035 if (host->mmc->f_max) { in sh_mmcif_clk_setup()
1038 f_max = host->mmc->f_max; in sh_mmcif_clk_setup()
1040 f_min = clk_round_rate(host->clk, f_min_old / 2); in sh_mmcif_clk_setup()
1047 * This driver assumes this SoC is R-Car Gen2 or later in sh_mmcif_clk_setup()
1049 host->clkdiv_map = 0x3ff; in sh_mmcif_clk_setup()
1051 host->mmc->f_max = f_max >> ffs(host->clkdiv_map); in sh_mmcif_clk_setup()
1052 host->mmc->f_min = f_min >> fls(host->clkdiv_map); in sh_mmcif_clk_setup()
1054 unsigned int clk = clk_get_rate(host->clk); in sh_mmcif_clk_setup()
1056 host->mmc->f_max = clk / 2; in sh_mmcif_clk_setup()
1057 host->mmc->f_min = clk / 512; in sh_mmcif_clk_setup()
1061 host->mmc->f_max, host->mmc->f_min); in sh_mmcif_clk_setup()
1070 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_set_ios()
1071 if (host->state != STATE_IDLE) { in sh_mmcif_set_ios()
1073 __func__, host->state); in sh_mmcif_set_ios()
1074 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1078 host->state = STATE_IOS; in sh_mmcif_set_ios()
1079 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_set_ios()
1081 switch (ios->power_mode) { in sh_mmcif_set_ios()
1083 if (!IS_ERR(mmc->supply.vmmc)) in sh_mmcif_set_ios()
1084 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); in sh_mmcif_set_ios()
1085 if (!host->power) { in sh_mmcif_set_ios()
1086 clk_prepare_enable(host->clk); in sh_mmcif_set_ios()
1090 host->power = true; in sh_mmcif_set_ios()
1094 if (!IS_ERR(mmc->supply.vmmc)) in sh_mmcif_set_ios()
1095 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); in sh_mmcif_set_ios()
1096 if (host->power) { in sh_mmcif_set_ios()
1100 clk_disable_unprepare(host->clk); in sh_mmcif_set_ios()
1101 host->power = false; in sh_mmcif_set_ios()
1105 sh_mmcif_clock_control(host, ios->clock); in sh_mmcif_set_ios()
1109 host->timing = ios->timing; in sh_mmcif_set_ios()
1110 host->bus_width = ios->bus_width; in sh_mmcif_set_ios()
1111 host->state = STATE_IDLE; in sh_mmcif_set_ios()
1122 struct mmc_command *cmd = host->mrq->cmd; in sh_mmcif_end_cmd()
1123 struct mmc_data *data = host->mrq->data; in sh_mmcif_end_cmd()
1127 if (host->sd_error) { in sh_mmcif_end_cmd()
1128 switch (cmd->opcode) { in sh_mmcif_end_cmd()
1132 cmd->error = -ETIMEDOUT; in sh_mmcif_end_cmd()
1135 cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1139 cmd->opcode, cmd->error); in sh_mmcif_end_cmd()
1140 host->sd_error = false; in sh_mmcif_end_cmd()
1143 if (!(cmd->flags & MMC_RSP_PRESENT)) { in sh_mmcif_end_cmd()
1144 cmd->error = 0; in sh_mmcif_end_cmd()
1157 init_completion(&host->dma_complete); in sh_mmcif_end_cmd()
1159 if (data->flags & MMC_DATA_READ) { in sh_mmcif_end_cmd()
1160 if (host->chan_rx) in sh_mmcif_end_cmd()
1163 if (host->chan_tx) in sh_mmcif_end_cmd()
1167 if (!host->dma_active) { in sh_mmcif_end_cmd()
1168 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); in sh_mmcif_end_cmd()
1169 return !data->error; in sh_mmcif_end_cmd()
1173 time = wait_for_completion_interruptible_timeout(&host->dma_complete, in sh_mmcif_end_cmd()
1174 host->timeout); in sh_mmcif_end_cmd()
1176 if (data->flags & MMC_DATA_READ) in sh_mmcif_end_cmd()
1177 dma_unmap_sg(host->chan_rx->device->dev, in sh_mmcif_end_cmd()
1178 data->sg, data->sg_len, in sh_mmcif_end_cmd()
1181 dma_unmap_sg(host->chan_tx->device->dev, in sh_mmcif_end_cmd()
1182 data->sg, data->sg_len, in sh_mmcif_end_cmd()
1185 if (host->sd_error) { in sh_mmcif_end_cmd()
1186 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1189 data->error = sh_mmcif_error_manage(host); in sh_mmcif_end_cmd()
1191 dev_err(host->mmc->parent, "DMA timeout!\n"); in sh_mmcif_end_cmd()
1192 data->error = -ETIMEDOUT; in sh_mmcif_end_cmd()
1194 dev_err(host->mmc->parent, in sh_mmcif_end_cmd()
1196 data->error = time; in sh_mmcif_end_cmd()
1200 host->dma_active = false; in sh_mmcif_end_cmd()
1202 if (data->error) { in sh_mmcif_end_cmd()
1203 data->bytes_xfered = 0; in sh_mmcif_end_cmd()
1205 if (data->flags & MMC_DATA_READ) in sh_mmcif_end_cmd()
1206 dmaengine_terminate_sync(host->chan_rx); in sh_mmcif_end_cmd()
1208 dmaengine_terminate_sync(host->chan_tx); in sh_mmcif_end_cmd()
1223 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_irqt()
1224 wait_work = host->wait_for; in sh_mmcif_irqt()
1225 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_irqt()
1227 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_irqt()
1229 mutex_lock(&host->thread_lock); in sh_mmcif_irqt()
1231 mrq = host->mrq; in sh_mmcif_irqt()
1234 host->state, host->wait_for); in sh_mmcif_irqt()
1235 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1241 * request has to be completed - successfully or not in sh_mmcif_irqt()
1246 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1269 if (host->sd_error) { in sh_mmcif_irqt()
1270 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1271 dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error); in sh_mmcif_irqt()
1274 sh_mmcif_get_cmd12response(host, mrq->stop); in sh_mmcif_irqt()
1275 mrq->stop->error = 0; in sh_mmcif_irqt()
1279 if (host->sd_error) { in sh_mmcif_irqt()
1280 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_irqt()
1281 dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error); in sh_mmcif_irqt()
1289 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1291 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1295 if (host->wait_for != MMCIF_WAIT_FOR_STOP) { in sh_mmcif_irqt()
1296 struct mmc_data *data = mrq->data; in sh_mmcif_irqt()
1297 if (!mrq->cmd->error && data && !data->error) in sh_mmcif_irqt()
1298 data->bytes_xfered = in sh_mmcif_irqt()
1299 data->blocks * data->blksz; in sh_mmcif_irqt()
1301 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { in sh_mmcif_irqt()
1303 if (!mrq->stop->error) { in sh_mmcif_irqt()
1304 schedule_delayed_work(&host->timeout_work, host->timeout); in sh_mmcif_irqt()
1305 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1311 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_irqt()
1312 host->state = STATE_IDLE; in sh_mmcif_irqt()
1313 host->mrq = NULL; in sh_mmcif_irqt()
1314 mmc_request_done(host->mmc, mrq); in sh_mmcif_irqt()
1316 mutex_unlock(&host->thread_lock); in sh_mmcif_irqt()
1327 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); in sh_mmcif_intr()
1328 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); in sh_mmcif_intr()
1329 if (host->ccs_enable) in sh_mmcif_intr()
1330 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); in sh_mmcif_intr()
1332 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); in sh_mmcif_intr()
1340 host->sd_error = true; in sh_mmcif_intr()
1344 if (!host->mrq) in sh_mmcif_intr()
1346 if (!host->dma_active) in sh_mmcif_intr()
1348 else if (host->sd_error) in sh_mmcif_intr()
1361 struct mmc_request *mrq = host->mrq; in sh_mmcif_timeout_work()
1365 if (host->dying) in sh_mmcif_timeout_work()
1369 spin_lock_irqsave(&host->lock, flags); in sh_mmcif_timeout_work()
1370 if (host->state == STATE_IDLE) { in sh_mmcif_timeout_work()
1371 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1376 host->wait_for, mrq->cmd->opcode); in sh_mmcif_timeout_work()
1378 host->state = STATE_TIMEOUT; in sh_mmcif_timeout_work()
1379 spin_unlock_irqrestore(&host->lock, flags); in sh_mmcif_timeout_work()
1385 switch (host->wait_for) { in sh_mmcif_timeout_work()
1387 mrq->cmd->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1390 mrq->stop->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1398 mrq->data->error = sh_mmcif_error_manage(host); in sh_mmcif_timeout_work()
1404 host->state = STATE_IDLE; in sh_mmcif_timeout_work()
1405 host->wait_for = MMCIF_WAIT_FOR_REQUEST; in sh_mmcif_timeout_work()
1406 host->mrq = NULL; in sh_mmcif_timeout_work()
1407 mmc_request_done(host->mmc, mrq); in sh_mmcif_timeout_work()
1413 struct sh_mmcif_plat_data *pd = dev->platform_data; in sh_mmcif_init_ocr()
1414 struct mmc_host *mmc = host->mmc; in sh_mmcif_init_ocr()
1421 if (!mmc->ocr_avail) in sh_mmcif_init_ocr()
1422 mmc->ocr_avail = pd->ocr; in sh_mmcif_init_ocr()
1423 else if (pd->ocr) in sh_mmcif_init_ocr()
1432 struct device *dev = &pdev->dev; in sh_mmcif_probe()
1433 struct sh_mmcif_plat_data *pd = dev->platform_data; in sh_mmcif_probe()
1448 return -ENOMEM; in sh_mmcif_probe()
1455 host->mmc = mmc; in sh_mmcif_probe()
1456 host->addr = reg; in sh_mmcif_probe()
1457 host->timeout = msecs_to_jiffies(10000); in sh_mmcif_probe()
1458 host->ccs_enable = true; in sh_mmcif_probe()
1459 host->clk_ctrl2_enable = false; in sh_mmcif_probe()
1461 host->pd = pdev; in sh_mmcif_probe()
1463 spin_lock_init(&host->lock); in sh_mmcif_probe()
1465 mmc->ops = &sh_mmcif_ops; in sh_mmcif_probe()
1468 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; in sh_mmcif_probe()
1469 mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO; in sh_mmcif_probe()
1470 mmc->max_busy_timeout = 10000; in sh_mmcif_probe()
1472 if (pd && pd->caps) in sh_mmcif_probe()
1473 mmc->caps |= pd->caps; in sh_mmcif_probe()
1474 mmc->max_segs = 32; in sh_mmcif_probe()
1475 mmc->max_blk_size = 512; in sh_mmcif_probe()
1476 mmc->max_req_size = PAGE_SIZE * mmc->max_segs; in sh_mmcif_probe()
1477 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; in sh_mmcif_probe()
1478 mmc->max_seg_size = mmc->max_req_size; in sh_mmcif_probe()
1482 host->clk = devm_clk_get(dev, NULL); in sh_mmcif_probe()
1483 if (IS_ERR(host->clk)) { in sh_mmcif_probe()
1484 ret = PTR_ERR(host->clk); in sh_mmcif_probe()
1489 ret = clk_prepare_enable(host->clk); in sh_mmcif_probe()
1496 host->power = false; in sh_mmcif_probe()
1502 INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work); in sh_mmcif_probe()
1505 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_probe()
1524 mutex_init(&host->thread_lock); in sh_mmcif_probe()
1533 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, in sh_mmcif_probe()
1534 clk_get_rate(host->clk) / 1000000UL); in sh_mmcif_probe()
1537 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1541 clk_disable_unprepare(host->clk); in sh_mmcif_probe()
1553 host->dying = true; in sh_mmcif_remove()
1554 clk_prepare_enable(host->clk); in sh_mmcif_remove()
1555 pm_runtime_get_sync(&pdev->dev); in sh_mmcif_remove()
1557 dev_pm_qos_hide_latency_limit(&pdev->dev); in sh_mmcif_remove()
1559 mmc_remove_host(host->mmc); in sh_mmcif_remove()
1560 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_remove()
1565 * (a query on the linux-mmc mailing list didn't bring any replies). in sh_mmcif_remove()
1567 cancel_delayed_work_sync(&host->timeout_work); in sh_mmcif_remove()
1569 clk_disable_unprepare(host->clk); in sh_mmcif_remove()
1570 mmc_free_host(host->mmc); in sh_mmcif_remove()
1571 pm_runtime_put_sync(&pdev->dev); in sh_mmcif_remove()
1572 pm_runtime_disable(&pdev->dev); in sh_mmcif_remove()
1581 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); in sh_mmcif_suspend()
1610 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");