Lines Matching +full:rx +full:- +full:only

1 // SPDX-License-Identifier: GPL-2.0-or-later
10 * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
11 * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
19 #include <linux/dma-mapping.h>
76 #define DRV_NAME "spi-bcm2835"
85 * struct bcm2835_spi - BCM2835 SPI controller
88 * @cs_gpio: chip-select GPIO descriptor
90 * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
99 * @rx_prologue: bytes received without DMA if first RX sglist entry's
102 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
114 * @rx_dma_active: whether a RX DMA descriptor is in progress
116 * @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
150 * struct bcm2835_spidev - BCM2835 SPI target
151 * @prepare_cs: precalculated CS register value for ->prepare_message()
152 * (uses target-specific clock polarity and phase settings)
153 * @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
154 * (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
156 * @clear_rx_cs: precalculated CS register value to clear RX FIFO
157 * (uses target-specific clock polarity and phase settings)
174 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); in bcm2835_debugfs_create()
178 bs->debugfs_dir = dir; in bcm2835_debugfs_create()
182 &bs->count_transfer_polling); in bcm2835_debugfs_create()
184 &bs->count_transfer_irq); in bcm2835_debugfs_create()
186 &bs->count_transfer_irq_after_polling); in bcm2835_debugfs_create()
188 &bs->count_transfer_dma); in bcm2835_debugfs_create()
193 debugfs_remove_recursive(bs->debugfs_dir); in bcm2835_debugfs_remove()
194 bs->debugfs_dir = NULL; in bcm2835_debugfs_remove()
209 return readl(bs->regs + reg); in bcm2835_rd()
214 writel(val, bs->regs + reg); in bcm2835_wr()
221 while ((bs->rx_len) && in bcm2835_rd_fifo()
224 if (bs->rx_buf) in bcm2835_rd_fifo()
225 *bs->rx_buf++ = byte; in bcm2835_rd_fifo()
226 bs->rx_len--; in bcm2835_rd_fifo()
234 while ((bs->tx_len) && in bcm2835_wr_fifo()
236 byte = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo()
238 bs->tx_len--; in bcm2835_wr_fifo()
243 * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
245 * @count: bytes to read from RX FIFO
247 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
248 * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
250 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
257 bs->rx_len -= count; in bcm2835_rd_fifo_count()
262 memcpy(bs->rx_buf, &val, len); in bcm2835_rd_fifo_count()
263 bs->rx_buf += len; in bcm2835_rd_fifo_count()
264 count -= 4; in bcm2835_rd_fifo_count()
269 * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
273 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
276 * 32-bit instead of just 8-bit).
283 bs->tx_len -= count; in bcm2835_wr_fifo_count()
286 if (bs->tx_buf) { in bcm2835_wr_fifo_count()
288 memcpy(&val, bs->tx_buf, len); in bcm2835_wr_fifo_count()
289 bs->tx_buf += len; in bcm2835_wr_fifo_count()
294 count -= 4; in bcm2835_wr_fifo_count()
299 * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
302 * The caller must ensure that the RX FIFO can accommodate as many bytes
304 * RX FIFO is full, causing this function to spin forever.
313 * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
315 * @count: bytes available for reading in RX FIFO
321 count = min(count, bs->rx_len); in bcm2835_rd_fifo_blind()
322 bs->rx_len -= count; in bcm2835_rd_fifo_blind()
326 if (bs->rx_buf) in bcm2835_rd_fifo_blind()
327 *bs->rx_buf++ = val; in bcm2835_rd_fifo_blind()
328 } while (--count); in bcm2835_rd_fifo_blind()
332 * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
340 count = min(count, bs->tx_len); in bcm2835_wr_fifo_blind()
341 bs->tx_len -= count; in bcm2835_wr_fifo_blind()
344 val = bs->tx_buf ? *bs->tx_buf++ : 0; in bcm2835_wr_fifo_blind()
346 } while (--count); in bcm2835_wr_fifo_blind()
365 /* and reset RX/TX FIFOS */ in bcm2835_spi_reset_hw()
385 * or if RXR is set (RX FIFO >= ¾ full). in bcm2835_spi_interrupt()
392 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE) in bcm2835_spi_interrupt()
400 if (!bs->rx_len) { in bcm2835_spi_interrupt()
401 /* Transfer complete - reset SPI HW */ in bcm2835_spi_interrupt()
404 spi_finalize_current_transfer(bs->ctlr); in bcm2835_spi_interrupt()
418 bs->count_transfer_irq++; in bcm2835_spi_transfer_one_irq()
440 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
447 * Only the final write access is permitted to transmit less than 4 bytes, the
450 * If a TX or RX sglist contains multiple entries, one per page, and the first
462 * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
466 * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
467 * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
468 * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
469 * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
471 * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
472 * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
479 * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
482 * be transmitted in 32-bit width to ensure that the following DMA transfer can
483 * pick up the residue in the RX FIFO in ungarbled form.
492 bs->tfr = tfr; in bcm2835_spi_transfer_prologue()
493 bs->tx_prologue = 0; in bcm2835_spi_transfer_prologue()
494 bs->rx_prologue = 0; in bcm2835_spi_transfer_prologue()
495 bs->tx_spillover = false; in bcm2835_spi_transfer_prologue()
497 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0])) in bcm2835_spi_transfer_prologue()
498 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
500 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
501 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3; in bcm2835_spi_transfer_prologue()
503 if (bs->rx_prologue > bs->tx_prologue) { in bcm2835_spi_transfer_prologue()
504 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) { in bcm2835_spi_transfer_prologue()
505 bs->tx_prologue = bs->rx_prologue; in bcm2835_spi_transfer_prologue()
507 bs->tx_prologue += 4; in bcm2835_spi_transfer_prologue()
508 bs->tx_spillover = in bcm2835_spi_transfer_prologue()
509 !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3); in bcm2835_spi_transfer_prologue()
514 /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */ in bcm2835_spi_transfer_prologue()
515 if (!bs->tx_prologue) in bcm2835_spi_transfer_prologue()
518 /* Write and read RX prologue. Adjust first entry in RX sglist. */ in bcm2835_spi_transfer_prologue()
519 if (bs->rx_prologue) { in bcm2835_spi_transfer_prologue()
520 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
523 bcm2835_wr_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
525 bcm2835_rd_fifo_count(bs, bs->rx_prologue); in bcm2835_spi_transfer_prologue()
530 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_transfer_prologue()
531 sg_dma_address(&tfr->rx_sg.sgl[0]), in bcm2835_spi_transfer_prologue()
532 bs->rx_prologue, DMA_FROM_DEVICE); in bcm2835_spi_transfer_prologue()
534 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_transfer_prologue()
535 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_transfer_prologue()
538 if (!bs->tx_buf) in bcm2835_spi_transfer_prologue()
545 tx_remaining = bs->tx_prologue - bs->rx_prologue; in bcm2835_spi_transfer_prologue()
556 if (likely(!bs->tx_spillover)) { in bcm2835_spi_transfer_prologue()
557 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_transfer_prologue()
558 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_transfer_prologue()
560 sg_dma_len(&tfr->tx_sg.sgl[0]) = 0; in bcm2835_spi_transfer_prologue()
561 sg_dma_address(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_transfer_prologue()
562 sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_transfer_prologue()
567 * bcm2835_spi_undo_prologue() - reconstruct original sglist state
576 struct spi_transfer *tfr = bs->tfr; in bcm2835_spi_undo_prologue()
578 if (!bs->tx_prologue) in bcm2835_spi_undo_prologue()
581 if (bs->rx_prologue) { in bcm2835_spi_undo_prologue()
582 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue; in bcm2835_spi_undo_prologue()
583 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue; in bcm2835_spi_undo_prologue()
586 if (!bs->tx_buf) in bcm2835_spi_undo_prologue()
589 if (likely(!bs->tx_spillover)) { in bcm2835_spi_undo_prologue()
590 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue; in bcm2835_spi_undo_prologue()
591 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue; in bcm2835_spi_undo_prologue()
593 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4; in bcm2835_spi_undo_prologue()
594 sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4; in bcm2835_spi_undo_prologue()
595 sg_dma_len(&tfr->tx_sg.sgl[1]) += 4; in bcm2835_spi_undo_prologue()
598 bs->tx_prologue = 0; in bcm2835_spi_undo_prologue()
602 * bcm2835_spi_dma_rx_done() - callback for DMA RX channel
605 * Used for bidirectional and RX-only transfers.
612 /* terminate tx-dma as we do not have an irq for it in bcm2835_spi_dma_rx_done()
613 * because when the rx dma will terminate and this callback in bcm2835_spi_dma_rx_done()
614 * is called the tx-dma must have finished - can't get to this in bcm2835_spi_dma_rx_done()
617 dmaengine_terminate_async(ctlr->dma_tx); in bcm2835_spi_dma_rx_done()
618 bs->tx_dma_active = false; in bcm2835_spi_dma_rx_done()
619 bs->rx_dma_active = false; in bcm2835_spi_dma_rx_done()
630 * bcm2835_spi_dma_tx_done() - callback for DMA TX channel
633 * Used for TX-only transfers.
640 /* busy-wait for TX FIFO to empty */ in bcm2835_spi_dma_tx_done()
642 bcm2835_wr(bs, BCM2835_SPI_CS, bs->target->clear_rx_cs); in bcm2835_spi_dma_tx_done()
644 bs->tx_dma_active = false; in bcm2835_spi_dma_tx_done()
648 * In case of a very short transfer, RX DMA may not have been in bcm2835_spi_dma_tx_done()
652 if (cmpxchg(&bs->rx_dma_active, true, false)) in bcm2835_spi_dma_tx_done()
653 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_dma_tx_done()
661 * bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
666 * @is_tx: whether to submit DMA descriptor for TX or RX sglist
668 * Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
688 chan = ctlr->dma_tx; in bcm2835_spi_prepare_sg()
689 nents = tfr->tx_sg.nents; in bcm2835_spi_prepare_sg()
690 sgl = tfr->tx_sg.sgl; in bcm2835_spi_prepare_sg()
691 flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT; in bcm2835_spi_prepare_sg()
694 chan = ctlr->dma_rx; in bcm2835_spi_prepare_sg()
695 nents = tfr->rx_sg.nents; in bcm2835_spi_prepare_sg()
696 sgl = tfr->rx_sg.sgl; in bcm2835_spi_prepare_sg()
702 return -EINVAL; in bcm2835_spi_prepare_sg()
705 * Completion is signaled by the RX channel for bidirectional and in bcm2835_spi_prepare_sg()
706 * RX-only transfers; else by the TX channel for TX-only transfers. in bcm2835_spi_prepare_sg()
709 desc->callback = bcm2835_spi_dma_rx_done; in bcm2835_spi_prepare_sg()
710 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
711 } else if (!tfr->rx_buf) { in bcm2835_spi_prepare_sg()
712 desc->callback = bcm2835_spi_dma_tx_done; in bcm2835_spi_prepare_sg()
713 desc->callback_param = ctlr; in bcm2835_spi_prepare_sg()
714 bs->target = target; in bcm2835_spi_prepare_sg()
717 /* submit it to DMA-engine */ in bcm2835_spi_prepare_sg()
724 * bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
730 * For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
731 * the TX and RX DMA channel to copy between memory and FIFO register.
733 * For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
734 * memory is pointless. However not reading the RX FIFO isn't an option either
736 * clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
739 * this is called only once, on target registration. A DMA descriptor to write
741 * when performing a TX-only transfer is to submit this descriptor to the RX
746 * Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
747 * when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
748 * register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
749 * accesses, whereas clearing it requires only 1 bus access. So an 8-fold
752 * For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
754 * in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
760 * feature is not available on so-called "lite" channels, but normally TX DMA
761 * is backed by a full-featured channel.
763 * Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
766 * has finished, the DMA engine zero-fills the TX FIFO until it is half full.
768 * performed at the end of an RX-only transfer.
780 bs->count_transfer_dma++; in bcm2835_spi_transfer_one_dma()
783 * Transfer first few bytes without DMA if length of first TX or RX in bcm2835_spi_transfer_one_dma()
788 /* setup tx-DMA */ in bcm2835_spi_transfer_one_dma()
789 if (bs->tx_buf) { in bcm2835_spi_transfer_one_dma()
792 cookie = dmaengine_submit(bs->fill_tx_desc); in bcm2835_spi_transfer_one_dma()
799 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len); in bcm2835_spi_transfer_one_dma()
805 bs->tx_dma_active = true; in bcm2835_spi_transfer_one_dma()
809 dma_async_issue_pending(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
811 /* setup rx-DMA late - to run transfers while in bcm2835_spi_transfer_one_dma()
812 * mapping of the rx buffers still takes place in bcm2835_spi_transfer_one_dma()
815 if (bs->rx_buf) { in bcm2835_spi_transfer_one_dma()
818 cookie = dmaengine_submit(target->clear_rx_desc); in bcm2835_spi_transfer_one_dma()
823 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_transfer_one_dma()
824 bs->tx_dma_active = false; in bcm2835_spi_transfer_one_dma()
828 /* start rx dma late */ in bcm2835_spi_transfer_one_dma()
829 dma_async_issue_pending(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
830 bs->rx_dma_active = true; in bcm2835_spi_transfer_one_dma()
834 * In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done() in bcm2835_spi_transfer_one_dma()
835 * may run before RX DMA is issued. Terminate RX DMA if so. in bcm2835_spi_transfer_one_dma()
837 if (!bs->rx_buf && !bs->tx_dma_active && in bcm2835_spi_transfer_one_dma()
838 cmpxchg(&bs->rx_dma_active, true, false)) { in bcm2835_spi_transfer_one_dma()
839 dmaengine_terminate_async(ctlr->dma_rx); in bcm2835_spi_transfer_one_dma()
856 /* we start DMA efforts only on bigger transfers */ in bcm2835_spi_can_dma()
857 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH) in bcm2835_spi_can_dma()
867 if (ctlr->dma_tx) { in bcm2835_dma_release()
868 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_dma_release()
870 if (bs->fill_tx_desc) in bcm2835_dma_release()
871 dmaengine_desc_free(bs->fill_tx_desc); in bcm2835_dma_release()
873 if (bs->fill_tx_addr) in bcm2835_dma_release()
874 dma_unmap_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_release()
875 bs->fill_tx_addr, sizeof(u32), in bcm2835_dma_release()
879 dma_release_channel(ctlr->dma_tx); in bcm2835_dma_release()
880 ctlr->dma_tx = NULL; in bcm2835_dma_release()
883 if (ctlr->dma_rx) { in bcm2835_dma_release()
884 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_dma_release()
885 dma_release_channel(ctlr->dma_rx); in bcm2835_dma_release()
886 ctlr->dma_rx = NULL; in bcm2835_dma_release()
898 /* base address in dma-space */ in bcm2835_dma_init()
899 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); in bcm2835_dma_init()
901 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); in bcm2835_dma_init()
907 /* get tx/rx dma */ in bcm2835_dma_init()
908 ctlr->dma_tx = dma_request_chan(dev, "tx"); in bcm2835_dma_init()
909 if (IS_ERR(ctlr->dma_tx)) { in bcm2835_dma_init()
910 ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_tx), in bcm2835_dma_init()
911 "no tx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
912 ctlr->dma_tx = NULL; in bcm2835_dma_init()
915 ctlr->dma_rx = dma_request_chan(dev, "rx"); in bcm2835_dma_init()
916 if (IS_ERR(ctlr->dma_rx)) { in bcm2835_dma_init()
917 ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_rx), in bcm2835_dma_init()
918 "no rx-dma configuration found - not using dma mode\n"); in bcm2835_dma_init()
919 ctlr->dma_rx = NULL; in bcm2835_dma_init()
925 * or, in case of an RX-only transfer, cyclically copies from the zero in bcm2835_dma_init()
931 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); in bcm2835_dma_init()
935 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev, in bcm2835_dma_init()
939 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) { in bcm2835_dma_init()
940 dev_err(dev, "cannot map zero page - not using DMA mode\n"); in bcm2835_dma_init()
941 bs->fill_tx_addr = 0; in bcm2835_dma_init()
942 ret = -ENOMEM; in bcm2835_dma_init()
946 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx, in bcm2835_dma_init()
947 bs->fill_tx_addr, in bcm2835_dma_init()
950 if (!bs->fill_tx_desc) { in bcm2835_dma_init()
951 dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
952 ret = -ENOMEM; in bcm2835_dma_init()
956 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc); in bcm2835_dma_init()
958 dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n"); in bcm2835_dma_init()
963 * The RX DMA channel is used bidirectionally: It either reads the in bcm2835_dma_init()
964 * RX FIFO or, in case of a TX-only transfer, cyclically writes a in bcm2835_dma_init()
965 * precalculated value to the CS register to clear the RX FIFO. in bcm2835_dma_init()
972 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); in bcm2835_dma_init()
977 ctlr->can_dma = bcm2835_spi_can_dma; in bcm2835_dma_init()
982 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", in bcm2835_dma_init()
988 * Only report error for deferred probing, otherwise fall back to in bcm2835_dma_init()
991 if (ret != -EPROBE_DEFER) in bcm2835_dma_init()
1006 bs->count_transfer_polling++; in bcm2835_spi_transfer_one_poll()
1021 while (bs->rx_len) { in bcm2835_spi_transfer_one_poll()
1031 if (bs->rx_len && time_after(jiffies, timeout)) { in bcm2835_spi_transfer_one_poll()
1032 dev_dbg_ratelimited(&spi->dev, in bcm2835_spi_transfer_one_poll()
1033 … "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n", in bcm2835_spi_transfer_one_poll()
1034 jiffies - timeout, in bcm2835_spi_transfer_one_poll()
1035 bs->tx_len, bs->rx_len); in bcm2835_spi_transfer_one_poll()
1039 bs->count_transfer_irq_after_polling++; in bcm2835_spi_transfer_one_poll()
1046 /* Transfer complete - reset SPI HW */ in bcm2835_spi_transfer_one_poll()
1060 u32 cs = target->prepare_cs; in bcm2835_spi_transfer_one()
1063 spi_hz = tfr->speed_hz; in bcm2835_spi_transfer_one()
1065 if (spi_hz >= bs->clk_hz / 2) { in bcm2835_spi_transfer_one()
1069 cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); in bcm2835_spi_transfer_one()
1077 tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); in bcm2835_spi_transfer_one()
1080 /* handle all the 3-wire mode */ in bcm2835_spi_transfer_one()
1081 if (spi->mode & SPI_3WIRE && tfr->rx_buf) in bcm2835_spi_transfer_one()
1085 bs->tx_buf = tfr->tx_buf; in bcm2835_spi_transfer_one()
1086 bs->rx_buf = tfr->rx_buf; in bcm2835_spi_transfer_one()
1087 bs->tx_len = tfr->len; in bcm2835_spi_transfer_one()
1088 bs->rx_len = tfr->len; in bcm2835_spi_transfer_one()
1097 byte_limit = hz_per_byte ? tfr->effective_speed_hz / hz_per_byte : 1; in bcm2835_spi_transfer_one()
1100 if (tfr->len < byte_limit) in bcm2835_spi_transfer_one()
1107 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr)) in bcm2835_spi_transfer_one()
1110 /* run in interrupt-mode */ in bcm2835_spi_transfer_one()
1117 struct spi_device *spi = msg->spi; in bcm2835_spi_prepare_message()
1125 bcm2835_wr(bs, BCM2835_SPI_CS, target->prepare_cs); in bcm2835_spi_prepare_message()
1136 if (ctlr->dma_tx) { in bcm2835_spi_handle_err()
1137 dmaengine_terminate_sync(ctlr->dma_tx); in bcm2835_spi_handle_err()
1138 bs->tx_dma_active = false; in bcm2835_spi_handle_err()
1140 if (ctlr->dma_rx) { in bcm2835_spi_handle_err()
1141 dmaengine_terminate_sync(ctlr->dma_rx); in bcm2835_spi_handle_err()
1142 bs->rx_dma_active = false; in bcm2835_spi_handle_err()
1153 struct spi_controller *ctlr = spi->controller; in bcm2835_spi_cleanup()
1156 if (target->clear_rx_desc) in bcm2835_spi_cleanup()
1157 dmaengine_desc_free(target->clear_rx_desc); in bcm2835_spi_cleanup()
1159 if (target->clear_rx_addr) in bcm2835_spi_cleanup()
1160 dma_unmap_single(ctlr->dma_rx->device->dev, in bcm2835_spi_cleanup()
1161 target->clear_rx_addr, in bcm2835_spi_cleanup()
1165 gpiod_put(bs->cs_gpio); in bcm2835_spi_cleanup()
1178 if (!ctlr->dma_rx) in bcm2835_spi_setup_dma()
1181 target->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev, in bcm2835_spi_setup_dma()
1182 &target->clear_rx_cs, in bcm2835_spi_setup_dma()
1185 if (dma_mapping_error(ctlr->dma_rx->device->dev, target->clear_rx_addr)) { in bcm2835_spi_setup_dma()
1186 dev_err(&spi->dev, "cannot map clear_rx_cs\n"); in bcm2835_spi_setup_dma()
1187 target->clear_rx_addr = 0; in bcm2835_spi_setup_dma()
1188 return -ENOMEM; in bcm2835_spi_setup_dma()
1191 target->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx, in bcm2835_spi_setup_dma()
1192 target->clear_rx_addr, in bcm2835_spi_setup_dma()
1195 if (!target->clear_rx_desc) { in bcm2835_spi_setup_dma()
1196 dev_err(&spi->dev, "cannot prepare clear_rx_desc\n"); in bcm2835_spi_setup_dma()
1197 return -ENOMEM; in bcm2835_spi_setup_dma()
1200 ret = dmaengine_desc_set_reuse(target->clear_rx_desc); in bcm2835_spi_setup_dma()
1202 dev_err(&spi->dev, "cannot reuse clear_rx_desc\n"); in bcm2835_spi_setup_dma()
1213 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO in bcm2835_spi_max_transfer_size()
1216 if (spi->controller->can_dma) in bcm2835_spi_max_transfer_size()
1224 struct spi_controller *ctlr = spi->controller; in bcm2835_spi_setup()
1235 return -ENOMEM; in bcm2835_spi_setup()
1245 * Precalculate SPI target's CS register value for ->prepare_message(): in bcm2835_spi_setup()
1246 * The driver always uses software-controlled GPIO chip select, hence in bcm2835_spi_setup()
1247 * set the hardware-controlled native chip select to an invalid value in bcm2835_spi_setup()
1251 if (spi->mode & SPI_CPOL) in bcm2835_spi_setup()
1253 if (spi->mode & SPI_CPHA) in bcm2835_spi_setup()
1255 target->prepare_cs = cs; in bcm2835_spi_setup()
1258 * Precalculate SPI target's CS register value to clear RX FIFO in bcm2835_spi_setup()
1259 * in case of a TX-only DMA transfer. in bcm2835_spi_setup()
1261 if (ctlr->dma_rx) { in bcm2835_spi_setup()
1262 target->clear_rx_cs = cs | BCM2835_SPI_CS_TA | in bcm2835_spi_setup()
1265 dma_sync_single_for_device(ctlr->dma_rx->device->dev, in bcm2835_spi_setup()
1266 target->clear_rx_addr, in bcm2835_spi_setup()
1272 * sanity checking the native-chipselects in bcm2835_spi_setup()
1274 if (spi->mode & SPI_NO_CS) in bcm2835_spi_setup()
1287 dev_err(&spi->dev, in bcm2835_spi_setup()
1288 "setup: only two native chip-selects are supported\n"); in bcm2835_spi_setup()
1289 ret = -EINVAL; in bcm2835_spi_setup()
1300 * https://www.spinics.net/lists/linux-gpio/msg36218.html in bcm2835_spi_setup()
1304 ret = -ENOMEM; in bcm2835_spi_setup()
1308 lookup->dev_id = dev_name(&spi->dev); in bcm2835_spi_setup()
1309 lookup->table[0] = GPIO_LOOKUP("pinctrl-bcm2835", in bcm2835_spi_setup()
1310 8 - (spi_get_chipselect(spi, 0)), in bcm2835_spi_setup()
1315 bs->cs_gpio = gpiod_get(&spi->dev, "cs", GPIOD_OUT_LOW); in bcm2835_spi_setup()
1317 if (IS_ERR(bs->cs_gpio)) { in bcm2835_spi_setup()
1318 ret = PTR_ERR(bs->cs_gpio); in bcm2835_spi_setup()
1322 spi_set_csgpiod(spi, 0, bs->cs_gpio); in bcm2835_spi_setup()
1325 dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n", in bcm2835_spi_setup()
1341 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs)); in bcm2835_spi_probe()
1343 return -ENOMEM; in bcm2835_spi_probe()
1347 ctlr->use_gpio_descriptors = true; in bcm2835_spi_probe()
1348 ctlr->mode_bits = BCM2835_SPI_MODE_BITS; in bcm2835_spi_probe()
1349 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); in bcm2835_spi_probe()
1350 ctlr->num_chipselect = 3; in bcm2835_spi_probe()
1351 ctlr->max_transfer_size = bcm2835_spi_max_transfer_size; in bcm2835_spi_probe()
1352 ctlr->setup = bcm2835_spi_setup; in bcm2835_spi_probe()
1353 ctlr->cleanup = bcm2835_spi_cleanup; in bcm2835_spi_probe()
1354 ctlr->transfer_one = bcm2835_spi_transfer_one; in bcm2835_spi_probe()
1355 ctlr->handle_err = bcm2835_spi_handle_err; in bcm2835_spi_probe()
1356 ctlr->prepare_message = bcm2835_spi_prepare_message; in bcm2835_spi_probe()
1357 ctlr->dev.of_node = pdev->dev.of_node; in bcm2835_spi_probe()
1360 bs->ctlr = ctlr; in bcm2835_spi_probe()
1362 bs->regs = devm_platform_ioremap_resource(pdev, 0); in bcm2835_spi_probe()
1363 if (IS_ERR(bs->regs)) in bcm2835_spi_probe()
1364 return PTR_ERR(bs->regs); in bcm2835_spi_probe()
1366 bs->clk = devm_clk_get_enabled(&pdev->dev, NULL); in bcm2835_spi_probe()
1367 if (IS_ERR(bs->clk)) in bcm2835_spi_probe()
1368 return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk), in bcm2835_spi_probe()
1371 ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2; in bcm2835_spi_probe()
1373 bs->irq = platform_get_irq(pdev, 0); in bcm2835_spi_probe()
1374 if (bs->irq < 0) in bcm2835_spi_probe()
1375 return bs->irq; in bcm2835_spi_probe()
1377 bs->clk_hz = clk_get_rate(bs->clk); in bcm2835_spi_probe()
1379 err = bcm2835_dma_init(ctlr, &pdev->dev, bs); in bcm2835_spi_probe()
1387 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, in bcm2835_spi_probe()
1388 IRQF_SHARED, dev_name(&pdev->dev), bs); in bcm2835_spi_probe()
1390 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); in bcm2835_spi_probe()
1396 dev_err(&pdev->dev, "could not register SPI controller: %d\n", in bcm2835_spi_probe()
1401 bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); in bcm2835_spi_probe()
1427 { .compatible = "brcm,bcm2835-spi", },