Lines Matching +full:src +full:- +full:coef
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2006-2009 DENX Software Engineering.
22 #include <linux/dma-mapping.h>
33 #include <asm/dcr-regs.h>
91 /* This array is used in data-check operations for storing a pattern */
98 /* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
135 switch (chan->device->id) { in print_cb()
145 cdb, chan->device->id, in print_cb()
146 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt), in print_cb()
147 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l), in print_cb()
148 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l), in print_cb()
149 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l) in print_cb()
159 cb, chan->device->id, in print_cb()
160 cb->cbc, cb->cbbc, cb->cbs, in print_cb()
161 cb->cbtah, cb->cbtal, in print_cb()
162 cb->cblah, cb->cblal); in print_cb()
164 if (i && !cb->ops[i].h && !cb->ops[i].l) in print_cb()
167 i, cb->ops[i].h, cb->ops[i].l); in print_cb()
176 for (; iter; iter = iter->hw_next) in print_cb_list()
177 print_cb(chan, iter->hw_desc); in print_cb_list()
180 static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src, in prep_dma_xor_dbg() argument
187 pr_debug("\t0x%016llx ", src[i]); in prep_dma_xor_dbg()
191 static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src, in prep_dma_pq_dbg() argument
198 pr_debug("\t0x%016llx ", src[i]); in prep_dma_pq_dbg()
204 static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src, in prep_dma_pqzero_sum_dbg() argument
210 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id); in prep_dma_pqzero_sum_dbg()
213 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]); in prep_dma_pqzero_sum_dbg()
216 pr_debug("\t0x%016llx(no) ", src[i]); in prep_dma_pqzero_sum_dbg()
221 pr_debug("\t0x%016llx ", src[src_cnt + i]); in prep_dma_pqzero_sum_dbg()
225 * Command (Descriptor) Blocks low-level routines
228 * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
236 switch (chan->device->id) { in ppc440spe_desc_init_interrupt()
238 p = desc->hw_desc; in ppc440spe_desc_init_interrupt()
239 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); in ppc440spe_desc_init_interrupt()
241 p->cbc = XOR_CBCR_CBCE_BIT; in ppc440spe_desc_init_interrupt()
245 memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_interrupt()
247 set_bit(PPC440SPE_DESC_INT, &desc->flags); in ppc440spe_desc_init_interrupt()
250 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id, in ppc440spe_desc_init_interrupt()
257 * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
262 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); in ppc440spe_desc_init_null_xor()
263 desc->hw_next = NULL; in ppc440spe_desc_init_null_xor()
264 desc->src_cnt = 0; in ppc440spe_desc_init_null_xor()
265 desc->dst_cnt = 1; in ppc440spe_desc_init_null_xor()
269 * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
274 struct xor_cb *hw_desc = desc->hw_desc; in ppc440spe_desc_init_xor()
276 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); in ppc440spe_desc_init_xor()
277 desc->hw_next = NULL; in ppc440spe_desc_init_xor()
278 desc->src_cnt = src_cnt; in ppc440spe_desc_init_xor()
279 desc->dst_cnt = 1; in ppc440spe_desc_init_xor()
281 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt; in ppc440spe_desc_init_xor()
284 hw_desc->cbc |= XOR_CBCR_CBCE_BIT; in ppc440spe_desc_init_xor()
288 * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
294 struct xor_cb *hw_desc = desc->hw_desc; in ppc440spe_desc_init_dma2pq()
296 memset(desc->hw_desc, 0, sizeof(struct xor_cb)); in ppc440spe_desc_init_dma2pq()
297 desc->hw_next = NULL; in ppc440spe_desc_init_dma2pq()
298 desc->src_cnt = src_cnt; in ppc440spe_desc_init_dma2pq()
299 desc->dst_cnt = dst_cnt; in ppc440spe_desc_init_dma2pq()
300 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags)); in ppc440spe_desc_init_dma2pq()
301 desc->descs_per_op = 0; in ppc440spe_desc_init_dma2pq()
303 hw_desc->cbc = XOR_CBCR_TGT_BIT; in ppc440spe_desc_init_dma2pq()
306 hw_desc->cbc |= XOR_CBCR_CBCE_BIT; in ppc440spe_desc_init_dma2pq()
314 * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
326 set_bits(op, &desc->flags); in ppc440spe_desc_init_dma01pq()
327 desc->src_cnt = src_cnt; in ppc440spe_desc_init_dma01pq()
328 desc->dst_cnt = dst_cnt; in ppc440spe_desc_init_dma01pq()
333 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ? in ppc440spe_desc_init_dma01pq()
336 list_for_each_entry(iter, &desc->group_list, chain_node) { in ppc440spe_desc_init_dma01pq()
337 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
338 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_dma01pq()
340 if (likely(!list_is_last(&iter->chain_node, in ppc440spe_desc_init_dma01pq()
341 &desc->group_list))) { in ppc440spe_desc_init_dma01pq()
343 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pq()
345 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pq()
350 * of the transaction (src, dst, ...) in ppc440spe_desc_init_dma01pq()
352 iter->hw_next = NULL; in ppc440spe_desc_init_dma01pq()
354 set_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pq()
356 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pq()
361 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) { in ppc440spe_desc_init_dma01pq()
363 * - first descriptors are for zeroing destinations in ppc440spe_desc_init_dma01pq()
365 * - descriptors remained are for GF-XOR operations. in ppc440spe_desc_init_dma01pq()
367 iter = list_first_entry(&desc->group_list, in ppc440spe_desc_init_dma01pq()
371 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) { in ppc440spe_desc_init_dma01pq()
372 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
373 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_desc_init_dma01pq()
374 iter = list_first_entry(&iter->chain_node, in ppc440spe_desc_init_dma01pq()
379 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) { in ppc440spe_desc_init_dma01pq()
380 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
381 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_desc_init_dma01pq()
382 iter = list_first_entry(&iter->chain_node, in ppc440spe_desc_init_dma01pq()
387 list_for_each_entry_from(iter, &desc->group_list, chain_node) { in ppc440spe_desc_init_dma01pq()
388 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
389 hw_desc->opc = dopc; in ppc440spe_desc_init_dma01pq()
392 /* This is either RXOR-only or mixed RXOR/WXOR */ in ppc440spe_desc_init_dma01pq()
398 iter = list_first_entry(&desc->group_list, in ppc440spe_desc_init_dma01pq()
401 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
402 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_desc_init_dma01pq()
404 if (desc->dst_cnt == DMA_DEST_MAX_NUM) { in ppc440spe_desc_init_dma01pq()
405 iter = list_first_entry(&iter->chain_node, in ppc440spe_desc_init_dma01pq()
408 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
409 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_desc_init_dma01pq()
413 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) { in ppc440spe_desc_init_dma01pq()
414 iter = list_first_entry(&iter->chain_node, in ppc440spe_desc_init_dma01pq()
417 list_for_each_entry_from(iter, &desc->group_list, in ppc440spe_desc_init_dma01pq()
419 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq()
420 hw_desc->opc = dopc; in ppc440spe_desc_init_dma01pq()
427 * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
442 * and/or Q to chan->pdest and/or chan->qdest as we have in ppc440spe_desc_init_dma01pqzero_sum()
445 iter = list_first_entry(&desc->group_list, in ppc440spe_desc_init_dma01pqzero_sum()
447 iter = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pqzero_sum()
451 iter = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pqzero_sum()
455 list_for_each_entry_from(iter, &desc->group_list, chain_node) { in ppc440spe_desc_init_dma01pqzero_sum()
456 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pqzero_sum()
457 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_dma01pqzero_sum()
458 iter->src_cnt = 0; in ppc440spe_desc_init_dma01pqzero_sum()
459 iter->dst_cnt = 0; in ppc440spe_desc_init_dma01pqzero_sum()
462 * - <src_cnt> descriptors starting from 2nd or 3rd in ppc440spe_desc_init_dma01pqzero_sum()
463 * descriptor are for GF-XOR operations; in ppc440spe_desc_init_dma01pqzero_sum()
464 * - remaining <dst_cnt> descriptors are for checking the result in ppc440spe_desc_init_dma01pqzero_sum()
470 hw_desc->opc = dopc; in ppc440spe_desc_init_dma01pqzero_sum()
473 hw_desc->opc = DMA_CDB_OPC_DCHECK128; in ppc440spe_desc_init_dma01pqzero_sum()
475 if (likely(!list_is_last(&iter->chain_node, in ppc440spe_desc_init_dma01pqzero_sum()
476 &desc->group_list))) { in ppc440spe_desc_init_dma01pqzero_sum()
478 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pqzero_sum()
485 * of the transaction (src, dst, ...) in ppc440spe_desc_init_dma01pqzero_sum()
487 iter->hw_next = NULL; in ppc440spe_desc_init_dma01pqzero_sum()
491 set_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pqzero_sum()
494 desc->src_cnt = src_cnt; in ppc440spe_desc_init_dma01pqzero_sum()
495 desc->dst_cnt = dst_cnt; in ppc440spe_desc_init_dma01pqzero_sum()
499 * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
504 struct dma_cdb *hw_desc = desc->hw_desc; in ppc440spe_desc_init_memcpy()
506 memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_memcpy()
507 desc->hw_next = NULL; in ppc440spe_desc_init_memcpy()
508 desc->src_cnt = 1; in ppc440spe_desc_init_memcpy()
509 desc->dst_cnt = 1; in ppc440spe_desc_init_memcpy()
512 set_bit(PPC440SPE_DESC_INT, &desc->flags); in ppc440spe_desc_init_memcpy()
514 clear_bit(PPC440SPE_DESC_INT, &desc->flags); in ppc440spe_desc_init_memcpy()
516 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_desc_init_memcpy()
520 * ppc440spe_desc_set_src_addr - set source address into the descriptor
531 switch (chan->device->id) { in ppc440spe_desc_set_src_addr()
542 dma_hw_desc = desc->hw_desc; in ppc440spe_desc_set_src_addr()
543 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow); in ppc440spe_desc_set_src_addr()
544 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi); in ppc440spe_desc_set_src_addr()
547 xor_hw_desc = desc->hw_desc; in ppc440spe_desc_set_src_addr()
548 xor_hw_desc->ops[src_idx].l = addrl; in ppc440spe_desc_set_src_addr()
549 xor_hw_desc->ops[src_idx].h |= addrh; in ppc440spe_desc_set_src_addr()
555 * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
564 switch (chan->device->id) { in ppc440spe_desc_set_src_mult()
567 dma_hw_desc = desc->hw_desc; in ppc440spe_desc_set_src_mult()
574 psgu = &dma_hw_desc->sg1u; in ppc440spe_desc_set_src_mult()
580 psgu = &dma_hw_desc->sg2u; in ppc440spe_desc_set_src_mult()
583 psgu = &dma_hw_desc->sg3u; in ppc440spe_desc_set_src_mult()
599 * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
611 switch (chan->device->id) { in ppc440spe_desc_set_dest_addr()
622 dma_hw_desc = desc->hw_desc; in ppc440spe_desc_set_dest_addr()
624 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u; in ppc440spe_desc_set_dest_addr()
625 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l; in ppc440spe_desc_set_dest_addr()
631 xor_hw_desc = desc->hw_desc; in ppc440spe_desc_set_dest_addr()
632 xor_hw_desc->cbtal = addrl; in ppc440spe_desc_set_dest_addr()
633 xor_hw_desc->cbtah |= addrh; in ppc440spe_desc_set_dest_addr()
639 * ppc440spe_desc_set_byte_count - set number of data bytes involved
649 switch (chan->device->id) { in ppc440spe_desc_set_byte_count()
652 dma_hw_desc = desc->hw_desc; in ppc440spe_desc_set_byte_count()
653 dma_hw_desc->cnt = cpu_to_le32(byte_count); in ppc440spe_desc_set_byte_count()
656 xor_hw_desc = desc->hw_desc; in ppc440spe_desc_set_byte_count()
657 xor_hw_desc->cbbc = byte_count; in ppc440spe_desc_set_byte_count()
663 * ppc440spe_desc_set_rxor_block_size - set RXOR block size
667 /* assume that byte_count is aligned on the 512-boundary; in ppc440spe_desc_set_rxor_block_size()
675 * ppc440spe_desc_set_dcheck - set CHECK pattern
682 switch (chan->device->id) { in ppc440spe_desc_set_dcheck()
685 dma_hw_desc = desc->hw_desc; in ppc440spe_desc_set_dcheck()
686 iowrite32(qword[0], &dma_hw_desc->sg3l); in ppc440spe_desc_set_dcheck()
687 iowrite32(qword[4], &dma_hw_desc->sg3u); in ppc440spe_desc_set_dcheck()
688 iowrite32(qword[8], &dma_hw_desc->sg2l); in ppc440spe_desc_set_dcheck()
689 iowrite32(qword[12], &dma_hw_desc->sg2u); in ppc440spe_desc_set_dcheck()
697 * ppc440spe_xor_set_link - set link address in xor CB
702 struct xor_cb *xor_hw_desc = prev_desc->hw_desc; in ppc440spe_xor_set_link()
704 if (unlikely(!next_desc || !(next_desc->phys))) { in ppc440spe_xor_set_link()
705 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n", in ppc440spe_xor_set_link()
707 next_desc ? next_desc->phys : 0); in ppc440spe_xor_set_link()
711 xor_hw_desc->cbs = 0; in ppc440spe_xor_set_link()
712 xor_hw_desc->cblal = next_desc->phys; in ppc440spe_xor_set_link()
713 xor_hw_desc->cblah = 0; in ppc440spe_xor_set_link()
714 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT; in ppc440spe_xor_set_link()
718 * ppc440spe_desc_set_link - set the address of descriptor following this
729 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) { in ppc440spe_desc_set_link()
732 * processing; in this case - it's ok. in ppc440spe_desc_set_link()
735 "prev->hw_next=0x%p\n", __func__, prev_desc, in ppc440spe_desc_set_link()
736 next_desc, prev_desc ? prev_desc->hw_next : 0); in ppc440spe_desc_set_link()
743 prev_desc->hw_next = next_desc; in ppc440spe_desc_set_link()
745 switch (chan->device->id) { in ppc440spe_desc_set_link()
751 while (tail->hw_next) in ppc440spe_desc_set_link()
752 tail = tail->hw_next; in ppc440spe_desc_set_link()
766 * ppc440spe_desc_get_link - get the address of the descriptor that
772 if (!desc->hw_next) in ppc440spe_desc_get_link()
775 return desc->hw_next->phys; in ppc440spe_desc_get_link()
779 * ppc440spe_desc_is_aligned - check alignment
784 return (desc->idx & (num_slots - 1)) ? 0 : 1; in ppc440spe_desc_is_aligned()
788 * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
797 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS; in ppc440spe_chan_xor_slot_count()
809 * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
821 dma_addr_t old_addr = srcs[i-1]; in ppc440spe_dma2_pq_slot_count()
828 if (i == src_cnt-1) in ppc440spe_dma2_pq_slot_count()
832 order = -1; in ppc440spe_dma2_pq_slot_count()
834 if (i == src_cnt-1) in ppc440spe_dma2_pq_slot_count()
841 if (i == src_cnt-2 || (order == -1 in ppc440spe_dma2_pq_slot_count()
842 && cur_addr != old_addr - len)) { in ppc440spe_dma2_pq_slot_count()
848 if (i == src_cnt-1) in ppc440spe_dma2_pq_slot_count()
852 if (i == src_cnt-1) in ppc440spe_dma2_pq_slot_count()
856 if (i == src_cnt-1) in ppc440spe_dma2_pq_slot_count()
881 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS; in ppc440spe_dma2_pq_slot_count()
886 * ADMA channel low-level routines
894 * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
901 u8 *p = chan->device->dma_desc_pool_virt; in ppc440spe_adma_device_clear_eot_status()
905 switch (chan->device->id) { in ppc440spe_adma_device_clear_eot_status()
909 dma_reg = chan->device->dma_reg; in ppc440spe_adma_device_clear_eot_status()
910 while ((rv = ioread32(&dma_reg->csfpl))) { in ppc440spe_adma_device_clear_eot_status()
912 cdb = (struct dma_cdb *)&p[i - in ppc440spe_adma_device_clear_eot_status()
913 (u32)chan->device->dma_desc_pool]; in ppc440spe_adma_device_clear_eot_status()
918 cdb->opc = 0; in ppc440spe_adma_device_clear_eot_status()
927 if (le32_to_cpu(cdb->sg1u) & in ppc440spe_adma_device_clear_eot_status()
945 list_for_each_entry(iter, &chan->chain, in ppc440spe_adma_device_clear_eot_status()
947 if (iter->phys == phys) in ppc440spe_adma_device_clear_eot_status()
954 BUG_ON(&iter->chain_node == &chan->chain); in ppc440spe_adma_device_clear_eot_status()
956 if (iter->xor_check_result) { in ppc440spe_adma_device_clear_eot_status()
958 &iter->flags)) { in ppc440spe_adma_device_clear_eot_status()
959 *iter->xor_check_result |= in ppc440spe_adma_device_clear_eot_status()
963 &iter->flags)) { in ppc440spe_adma_device_clear_eot_status()
964 *iter->xor_check_result |= in ppc440spe_adma_device_clear_eot_status()
972 rv = ioread32(&dma_reg->dsts); in ppc440spe_adma_device_clear_eot_status()
975 chan->device->id, rv); in ppc440spe_adma_device_clear_eot_status()
977 iowrite32(rv, &dma_reg->dsts); in ppc440spe_adma_device_clear_eot_status()
982 xor_reg = chan->device->xor_reg; in ppc440spe_adma_device_clear_eot_status()
983 rv = ioread32be(&xor_reg->sr); in ppc440spe_adma_device_clear_eot_status()
984 iowrite32be(rv, &xor_reg->sr); in ppc440spe_adma_device_clear_eot_status()
991 u32 val = ioread32be(&xor_reg->ccbalr); in ppc440spe_adma_device_clear_eot_status()
993 iowrite32be(val, &xor_reg->cblalr); in ppc440spe_adma_device_clear_eot_status()
995 val = ioread32be(&xor_reg->crsr); in ppc440spe_adma_device_clear_eot_status()
997 &xor_reg->crsr); in ppc440spe_adma_device_clear_eot_status()
1006 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) && in ppc440spe_adma_device_clear_eot_status()
1014 * ppc440spe_chan_is_busy - get the channel status
1022 switch (chan->device->id) { in ppc440spe_chan_is_busy()
1025 dma_reg = chan->device->dma_reg; in ppc440spe_chan_is_busy()
1029 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) || in ppc440spe_chan_is_busy()
1030 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp)) in ppc440spe_chan_is_busy()
1036 xor_reg = chan->device->xor_reg; in ppc440spe_chan_is_busy()
1037 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0; in ppc440spe_chan_is_busy()
1045 * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
1051 struct xor_regs *xor_reg = chan->device->xor_reg; in ppc440spe_chan_set_first_xor_descriptor()
1053 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) in ppc440spe_chan_set_first_xor_descriptor()
1060 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr); in ppc440spe_chan_set_first_xor_descriptor()
1062 iowrite32be(next_desc->phys, &xor_reg->cblalr); in ppc440spe_chan_set_first_xor_descriptor()
1063 iowrite32be(0, &xor_reg->cblahr); in ppc440spe_chan_set_first_xor_descriptor()
1064 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT, in ppc440spe_chan_set_first_xor_descriptor()
1065 &xor_reg->cbcr); in ppc440spe_chan_set_first_xor_descriptor()
1067 chan->hw_chain_inited = 1; in ppc440spe_chan_set_first_xor_descriptor()
1071 * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
1078 struct dma_regs *dma_reg = chan->device->dma_reg; in ppc440spe_dma_put_desc()
1080 pcdb = desc->phys; in ppc440spe_dma_put_desc()
1081 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags)) in ppc440spe_dma_put_desc()
1084 chan_last_sub[chan->device->id] = desc; in ppc440spe_dma_put_desc()
1086 ADMA_LL_DBG(print_cb(chan, desc->hw_desc)); in ppc440spe_dma_put_desc()
1088 iowrite32(pcdb, &dma_reg->cpfpl); in ppc440spe_dma_put_desc()
1092 * ppc440spe_chan_append - update the h/w chain in the channel
1104 switch (chan->device->id) { in ppc440spe_chan_append()
1110 iter = chan_last_sub[chan->device->id]; in ppc440spe_chan_append()
1114 iter = chan_first_cdb[chan->device->id]; in ppc440spe_chan_append()
1117 chan->hw_chain_inited = 1; in ppc440spe_chan_append()
1121 if (!iter->hw_next) in ppc440spe_chan_append()
1125 list_for_each_entry_continue(iter, &chan->chain, chain_node) { in ppc440spe_chan_append()
1127 if (!iter->hw_next) in ppc440spe_chan_append()
1133 if (!xor_last_submit->hw_next) in ppc440spe_chan_append()
1136 xor_reg = chan->device->xor_reg; in ppc440spe_chan_append()
1142 xcb = xor_last_linked->hw_desc; in ppc440spe_chan_append()
1143 xcb->cbc |= XOR_CBCR_CBCE_BIT; in ppc440spe_chan_append()
1145 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) { in ppc440spe_chan_append()
1149 xor_last_submit->hw_next); in ppc440spe_chan_append()
1152 xor_last_submit->hw_next)); in ppc440spe_chan_append()
1155 iowrite32be(ioread32be(&xor_reg->crsr) | in ppc440spe_chan_append()
1157 &xor_reg->crsr); in ppc440spe_chan_append()
1170 * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
1178 if (unlikely(!chan->hw_chain_inited)) in ppc440spe_chan_get_current_descriptor()
1182 switch (chan->device->id) { in ppc440spe_chan_get_current_descriptor()
1185 dma_reg = chan->device->dma_reg; in ppc440spe_chan_get_current_descriptor()
1186 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK); in ppc440spe_chan_get_current_descriptor()
1188 xor_reg = chan->device->xor_reg; in ppc440spe_chan_get_current_descriptor()
1189 return ioread32be(&xor_reg->ccbalr); in ppc440spe_chan_get_current_descriptor()
1195 * ppc440spe_chan_run - enable the channel
1201 switch (chan->device->id) { in ppc440spe_chan_run()
1208 xor_reg = chan->device->xor_reg; in ppc440spe_chan_run()
1212 &xor_reg->crsr); in ppc440spe_chan_run()
1249 * ppc440spe_can_rxor - check if the operands may be processed with RXOR
1271 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]); in ppc440spe_can_rxor()
1281 order = -1; in ppc440spe_can_rxor()
1287 if ((i == src_cnt - 2) || in ppc440spe_can_rxor()
1288 (order == -1 && cur_addr != old_addr - len)) { in ppc440spe_can_rxor()
1315 * ppc440spe_adma_device_estimate - estimate the efficiency of processing
1333 /* If RAID-6 capabilities were not activated don't try in ppc440spe_adma_estimate()
1337 return -1; in ppc440spe_adma_estimate()
1349 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) { in ppc440spe_adma_estimate()
1374 int best_rank = -1; in ppc440spe_async_tx_find_best_channel()
1381 * we sort out cases where temporary page-sized buffers in ppc440spe_async_tx_find_best_channel()
1400 if (dma_has_cap(cap, ref->chan->device->cap_mask)) { in ppc440spe_async_tx_find_best_channel()
1403 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst, in ppc440spe_async_tx_find_best_channel()
1407 best_chan = ref->chan; in ppc440spe_async_tx_find_best_channel()
1417 * ppc440spe_get_group_entry - get group entry with index idx
1423 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head; in ppc440spe_get_group_entry()
1426 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) { in ppc440spe_get_group_entry()
1428 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt); in ppc440spe_get_group_entry()
1432 list_for_each_entry(iter, &tdesc->group_list, chain_node) { in ppc440spe_get_group_entry()
1440 * ppc440spe_adma_free_slots - flags descriptor slots for reuse
1442 * Caller must hold &ppc440spe_chan->lock while calling this function
1447 int stride = slot->slots_per_op; in ppc440spe_adma_free_slots()
1449 while (stride--) { in ppc440spe_adma_free_slots()
1450 slot->slots_per_op = 0; in ppc440spe_adma_free_slots()
1451 slot = list_entry(slot->slot_node.next, in ppc440spe_adma_free_slots()
1458 * ppc440spe_adma_run_tx_complete_actions - call functions to be called
1466 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions()
1467 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions()
1468 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions()
1469 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions()
1471 dma_descriptor_unmap(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1475 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in ppc440spe_adma_run_tx_complete_actions()
1479 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions()
1485 * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
1493 if (!async_tx_test_ack(&desc->async_tx)) in ppc440spe_adma_clean_slot()
1499 if (list_is_last(&desc->chain_node, &chan->chain) || in ppc440spe_adma_clean_slot()
1500 desc->phys == ppc440spe_chan_get_current_descriptor(chan)) in ppc440spe_adma_clean_slot()
1503 if (chan->device->id != PPC440SPE_XOR_ID) { in ppc440spe_adma_clean_slot()
1513 struct dma_cdb *cdb = desc->hw_desc; in ppc440spe_adma_clean_slot()
1514 if (cdb->opc == DMA_CDB_OPC_DCHECK128) in ppc440spe_adma_clean_slot()
1518 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n", in ppc440spe_adma_clean_slot()
1519 desc->phys, desc->idx, desc->slots_per_op); in ppc440spe_adma_clean_slot()
1521 list_del(&desc->chain_node); in ppc440spe_adma_clean_slot()
1527 * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
1541 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n", in __ppc440spe_adma_slot_cleanup()
1542 chan->device->id, __func__); in __ppc440spe_adma_slot_cleanup()
1554 list_for_each_entry_safe(iter, _iter, &chan->chain, in __ppc440spe_adma_slot_cleanup()
1556 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d " in __ppc440spe_adma_slot_cleanup()
1559 iter->async_tx.cookie, iter->idx, busy, iter->phys, in __ppc440spe_adma_slot_cleanup()
1561 async_tx_test_ack(&iter->async_tx)); in __ppc440spe_adma_slot_cleanup()
1563 prefetch(&_iter->async_tx); in __ppc440spe_adma_slot_cleanup()
1574 * needs to be re-read (i.e. has been appended to) in __ppc440spe_adma_slot_cleanup()
1576 if (iter->phys == current_desc) { in __ppc440spe_adma_slot_cleanup()
1588 slot_cnt = iter->slot_cnt; in __ppc440spe_adma_slot_cleanup()
1589 slots_per_op = iter->slots_per_op; in __ppc440spe_adma_slot_cleanup()
1599 slot_cnt -= slots_per_op; in __ppc440spe_adma_slot_cleanup()
1608 slot_cnt = group_start->slot_cnt; in __ppc440spe_adma_slot_cleanup()
1611 &chan->chain, chain_node) { in __ppc440spe_adma_slot_cleanup()
1616 slot_cnt -= slots_per_op; in __ppc440spe_adma_slot_cleanup()
1622 chan->common.completed_cookie = cookie; in __ppc440spe_adma_slot_cleanup()
1652 chan->common.completed_cookie = cookie; in __ppc440spe_adma_slot_cleanup()
1659 * ppc440spe_adma_tasklet - clean up watch-dog initiator
1665 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING); in ppc440spe_adma_tasklet()
1667 spin_unlock(&chan->lock); in ppc440spe_adma_tasklet()
1671 * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
1675 spin_lock_bh(&chan->lock); in ppc440spe_adma_slot_cleanup()
1677 spin_unlock_bh(&chan->lock); in ppc440spe_adma_slot_cleanup()
1681 * ppc440spe_adma_alloc_slots - allocate free slots (if any)
1701 iter = chan->last_used; in ppc440spe_adma_alloc_slots()
1703 iter = list_entry(&chan->all_slots, in ppc440spe_adma_alloc_slots()
1706 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots, in ppc440spe_adma_alloc_slots()
1709 prefetch(&_iter->async_tx); in ppc440spe_adma_alloc_slots()
1710 if (iter->slots_per_op) { in ppc440spe_adma_alloc_slots()
1726 /* pre-ack all but the last descriptor */ in ppc440spe_adma_alloc_slots()
1728 async_tx_ack(&iter->async_tx); in ppc440spe_adma_alloc_slots()
1730 list_add_tail(&iter->chain_node, &chain); in ppc440spe_adma_alloc_slots()
1732 iter->async_tx.cookie = 0; in ppc440spe_adma_alloc_slots()
1733 iter->hw_next = NULL; in ppc440spe_adma_alloc_slots()
1734 iter->flags = 0; in ppc440spe_adma_alloc_slots()
1735 iter->slot_cnt = num_slots; in ppc440spe_adma_alloc_slots()
1736 iter->xor_check_result = NULL; in ppc440spe_adma_alloc_slots()
1738 iter->slots_per_op = slots_per_op - i; in ppc440spe_adma_alloc_slots()
1740 iter = list_entry(iter->slot_node.next, in ppc440spe_adma_alloc_slots()
1744 num_slots -= slots_per_op; in ppc440spe_adma_alloc_slots()
1746 alloc_tail->group_head = alloc_start; in ppc440spe_adma_alloc_slots()
1747 alloc_tail->async_tx.cookie = -EBUSY; in ppc440spe_adma_alloc_slots()
1748 list_splice(&chain, &alloc_tail->group_list); in ppc440spe_adma_alloc_slots()
1749 chan->last_used = last_used; in ppc440spe_adma_alloc_slots()
1757 tasklet_schedule(&chan->irq_tasklet); in ppc440spe_adma_alloc_slots()
1762 * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
1773 init = ppc440spe_chan->slots_allocated ? 0 : 1; in ppc440spe_adma_alloc_chan_resources()
1774 chan->chan_id = ppc440spe_chan->device->id; in ppc440spe_adma_alloc_chan_resources()
1777 i = ppc440spe_chan->slots_allocated; in ppc440spe_adma_alloc_chan_resources()
1778 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID) in ppc440spe_adma_alloc_chan_resources()
1783 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) { in ppc440spe_adma_alloc_chan_resources()
1788 " %d descriptor slots", i--); in ppc440spe_adma_alloc_chan_resources()
1792 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt; in ppc440spe_adma_alloc_chan_resources()
1793 slot->hw_desc = (void *) &hw_desc[i * db_sz]; in ppc440spe_adma_alloc_chan_resources()
1794 dma_async_tx_descriptor_init(&slot->async_tx, chan); in ppc440spe_adma_alloc_chan_resources()
1795 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit; in ppc440spe_adma_alloc_chan_resources()
1796 INIT_LIST_HEAD(&slot->chain_node); in ppc440spe_adma_alloc_chan_resources()
1797 INIT_LIST_HEAD(&slot->slot_node); in ppc440spe_adma_alloc_chan_resources()
1798 INIT_LIST_HEAD(&slot->group_list); in ppc440spe_adma_alloc_chan_resources()
1799 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz; in ppc440spe_adma_alloc_chan_resources()
1800 slot->idx = i; in ppc440spe_adma_alloc_chan_resources()
1802 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_alloc_chan_resources()
1803 ppc440spe_chan->slots_allocated++; in ppc440spe_adma_alloc_chan_resources()
1804 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots); in ppc440spe_adma_alloc_chan_resources()
1805 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_alloc_chan_resources()
1808 if (i && !ppc440spe_chan->last_used) { in ppc440spe_adma_alloc_chan_resources()
1809 ppc440spe_chan->last_used = in ppc440spe_adma_alloc_chan_resources()
1810 list_entry(ppc440spe_chan->all_slots.next, in ppc440spe_adma_alloc_chan_resources()
1815 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_alloc_chan_resources()
1817 ppc440spe_chan->device->id, i); in ppc440spe_adma_alloc_chan_resources()
1821 switch (ppc440spe_chan->device->id) { in ppc440spe_adma_alloc_chan_resources()
1824 ppc440spe_chan->hw_chain_inited = 0; in ppc440spe_adma_alloc_chan_resources()
1825 /* Use WXOR for self-testing */ in ppc440spe_adma_alloc_chan_resources()
1835 ppc440spe_chan->needs_unmap = 1; in ppc440spe_adma_alloc_chan_resources()
1838 return (i > 0) ? i : -ENOMEM; in ppc440spe_adma_alloc_chan_resources()
1842 * ppc440spe_rxor_set_region_data -
1847 struct xor_cb *xcb = desc->hw_desc; in ppc440spe_rxor_set_region()
1849 xcb->ops[xor_arg_no].h |= mask; in ppc440spe_rxor_set_region()
1853 * ppc440spe_rxor_set_src -
1858 struct xor_cb *xcb = desc->hw_desc; in ppc440spe_rxor_set_src()
1860 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE; in ppc440spe_rxor_set_src()
1861 xcb->ops[xor_arg_no].l = addr; in ppc440spe_rxor_set_src()
1865 * ppc440spe_rxor_set_mult -
1870 struct xor_cb *xcb = desc->hw_desc; in ppc440spe_rxor_set_mult()
1872 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8); in ppc440spe_rxor_set_mult()
1876 * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
1881 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n", in ppc440spe_adma_check_threshold()
1882 chan->device->id, chan->pending); in ppc440spe_adma_check_threshold()
1884 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) { in ppc440spe_adma_check_threshold()
1885 chan->pending = 0; in ppc440spe_adma_check_threshold()
1891 * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
1898 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan); in ppc440spe_adma_tx_submit()
1906 group_start = sw_desc->group_head; in ppc440spe_adma_tx_submit()
1907 slot_cnt = group_start->slot_cnt; in ppc440spe_adma_tx_submit()
1908 slots_per_op = group_start->slots_per_op; in ppc440spe_adma_tx_submit()
1910 spin_lock_bh(&chan->lock); in ppc440spe_adma_tx_submit()
1913 if (unlikely(list_empty(&chan->chain))) { in ppc440spe_adma_tx_submit()
1915 list_splice_init(&sw_desc->group_list, &chan->chain); in ppc440spe_adma_tx_submit()
1916 chan_first_cdb[chan->device->id] = group_start; in ppc440spe_adma_tx_submit()
1919 old_chain_tail = list_entry(chan->chain.prev, in ppc440spe_adma_tx_submit()
1922 list_splice_init(&sw_desc->group_list, in ppc440spe_adma_tx_submit()
1923 &old_chain_tail->chain_node); in ppc440spe_adma_tx_submit()
1929 chan->pending += slot_cnt / slots_per_op; in ppc440spe_adma_tx_submit()
1931 spin_unlock_bh(&chan->lock); in ppc440spe_adma_tx_submit()
1933 dev_dbg(chan->device->common.dev, in ppc440spe_adma_tx_submit()
1935 chan->device->id, __func__, in ppc440spe_adma_tx_submit()
1936 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc); in ppc440spe_adma_tx_submit()
1942 * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
1953 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_prep_dma_interrupt()
1954 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id, in ppc440spe_adma_prep_dma_interrupt()
1957 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_interrupt()
1962 group_start = sw_desc->group_head; in ppc440spe_adma_prep_dma_interrupt()
1964 group_start->unmap_len = 0; in ppc440spe_adma_prep_dma_interrupt()
1965 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_interrupt()
1967 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_interrupt()
1969 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_interrupt()
1973 * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
1990 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_memcpy()
1992 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_prep_dma_memcpy()
1994 ppc440spe_chan->device->id, __func__, len, in ppc440spe_adma_prep_dma_memcpy()
2000 group_start = sw_desc->group_head; in ppc440spe_adma_prep_dma_memcpy()
2005 sw_desc->unmap_len = len; in ppc440spe_adma_prep_dma_memcpy()
2006 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_memcpy()
2008 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_memcpy()
2010 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_memcpy()
2014 * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
2027 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id, in ppc440spe_adma_prep_dma_xor()
2033 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_prep_dma_xor()
2035 ppc440spe_chan->device->id, __func__, src_cnt, len, in ppc440spe_adma_prep_dma_xor()
2038 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_xor()
2043 group_start = sw_desc->group_head; in ppc440spe_adma_prep_dma_xor()
2046 while (src_cnt--) in ppc440spe_adma_prep_dma_xor()
2050 sw_desc->unmap_len = len; in ppc440spe_adma_prep_dma_xor()
2051 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_xor()
2053 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_xor()
2055 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_xor()
2064 * ppc440spe_adma_init_dma2rxor_slot -
2068 dma_addr_t *src, int src_cnt) in ppc440spe_adma_init_dma2rxor_slot() argument
2074 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i, in ppc440spe_adma_init_dma2rxor_slot()
2075 desc->src_cnt, (u32)src[i]); in ppc440spe_adma_init_dma2rxor_slot()
2080 * ppc440spe_dma01_prep_mult -
2085 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, in ppc440spe_dma01_prep_mult() argument
2095 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_mult()
2104 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_dma01_prep_mult()
2105 set_bits(op, &sw_desc->flags); in ppc440spe_dma01_prep_mult()
2106 sw_desc->src_cnt = src_cnt; in ppc440spe_dma01_prep_mult()
2107 sw_desc->dst_cnt = dst_cnt; in ppc440spe_dma01_prep_mult()
2111 iter = list_first_entry(&sw_desc->group_list, in ppc440spe_dma01_prep_mult()
2114 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_dma01_prep_mult()
2116 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_dma01_prep_mult()
2119 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_mult()
2120 hw_desc = iter->hw_desc; in ppc440spe_dma01_prep_mult()
2121 hw_desc->opc = DMA_CDB_OPC_MULTICAST; in ppc440spe_dma01_prep_mult()
2127 src[0]); in ppc440spe_dma01_prep_mult()
2129 iter->unmap_len = len; in ppc440spe_dma01_prep_mult()
2135 iter = list_first_entry(&iter->chain_node, in ppc440spe_dma01_prep_mult()
2138 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_dma01_prep_mult()
2139 iter->hw_next = NULL; in ppc440spe_dma01_prep_mult()
2141 set_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_mult()
2143 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_mult()
2145 hw_desc = iter->hw_desc; in ppc440spe_dma01_prep_mult()
2146 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_dma01_prep_mult()
2155 iter->unmap_len = len; in ppc440spe_dma01_prep_mult()
2156 sw_desc->async_tx.flags = flags; in ppc440spe_dma01_prep_mult()
2159 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_mult()
2165 * ppc440spe_dma01_prep_sum_product -
2171 dma_addr_t *dst, dma_addr_t *src, int src_cnt, in ppc440spe_dma01_prep_sum_product() argument
2181 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_sum_product()
2190 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_dma01_prep_sum_product()
2191 set_bits(op, &sw_desc->flags); in ppc440spe_dma01_prep_sum_product()
2192 sw_desc->src_cnt = src_cnt; in ppc440spe_dma01_prep_sum_product()
2193 sw_desc->dst_cnt = 1; in ppc440spe_dma01_prep_sum_product()
2194 /* 1st descriptor, src[1] data to q page and zero destination */ in ppc440spe_dma01_prep_sum_product()
2195 iter = list_first_entry(&sw_desc->group_list, in ppc440spe_dma01_prep_sum_product()
2198 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_dma01_prep_sum_product()
2199 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_dma01_prep_sum_product()
2202 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_sum_product()
2203 hw_desc = iter->hw_desc; in ppc440spe_dma01_prep_sum_product()
2204 hw_desc->opc = DMA_CDB_OPC_MULTICAST; in ppc440spe_dma01_prep_sum_product()
2209 ppc440spe_chan->qdest, 1); in ppc440spe_dma01_prep_sum_product()
2211 src[1]); in ppc440spe_dma01_prep_sum_product()
2213 iter->unmap_len = len; in ppc440spe_dma01_prep_sum_product()
2215 /* 2nd descriptor, multiply src[1] data and store the in ppc440spe_dma01_prep_sum_product()
2217 iter = list_first_entry(&iter->chain_node, in ppc440spe_dma01_prep_sum_product()
2220 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_dma01_prep_sum_product()
2222 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_dma01_prep_sum_product()
2226 set_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_sum_product()
2228 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_sum_product()
2230 hw_desc = iter->hw_desc; in ppc440spe_dma01_prep_sum_product()
2231 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_dma01_prep_sum_product()
2233 ppc440spe_chan->qdest); in ppc440spe_dma01_prep_sum_product()
2239 iter->unmap_len = len; in ppc440spe_dma01_prep_sum_product()
2242 * 3rd descriptor, multiply src[0] data and xor it in ppc440spe_dma01_prep_sum_product()
2245 iter = list_first_entry(&iter->chain_node, in ppc440spe_dma01_prep_sum_product()
2248 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_dma01_prep_sum_product()
2249 iter->hw_next = NULL; in ppc440spe_dma01_prep_sum_product()
2251 set_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_sum_product()
2253 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_dma01_prep_sum_product()
2255 hw_desc = iter->hw_desc; in ppc440spe_dma01_prep_sum_product()
2256 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_dma01_prep_sum_product()
2258 src[0]); in ppc440spe_dma01_prep_sum_product()
2264 iter->unmap_len = len; in ppc440spe_dma01_prep_sum_product()
2265 sw_desc->async_tx.flags = flags; in ppc440spe_dma01_prep_sum_product()
2268 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_sum_product()
2275 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, in ppc440spe_dma01_prep_pq() argument
2287 * of destinations (RXOR support only Q-parity calculations) in ppc440spe_dma01_prep_pq()
2293 * - there are more than 1 source, in ppc440spe_dma01_prep_pq()
2294 * - len is aligned on 512-byte boundary, in ppc440spe_dma01_prep_pq()
2295 * - source addresses fit to one of 4 possible regions. in ppc440spe_dma01_prep_pq()
2299 (src[0] + len) == src[1]) { in ppc440spe_dma01_prep_pq()
2304 if ((src[1] + len) == src[2]) { in ppc440spe_dma01_prep_pq()
2308 } else if ((src[1] + len * 2) == src[2]) { in ppc440spe_dma01_prep_pq()
2311 } else if ((src[1] + len * 3) == src[2]) { in ppc440spe_dma01_prep_pq()
2339 * source to GF-XOR them with WXOR, and need descriptors in ppc440spe_dma01_prep_pq()
2354 * need (src_cnt - (2 or 3)) for WXOR of sources in ppc440spe_dma01_prep_pq()
2365 slot_cnt += src_cnt - 2; in ppc440spe_dma01_prep_pq()
2367 slot_cnt += src_cnt - 3; in ppc440spe_dma01_prep_pq()
2377 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_pq()
2384 /* setup dst/src/mult */ in ppc440spe_dma01_prep_pq()
2388 while (src_cnt--) { in ppc440spe_dma01_prep_pq()
2389 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], in ppc440spe_dma01_prep_pq()
2396 * So, for P case set-up mult=1 explicitly. in ppc440spe_dma01_prep_pq()
2401 mult, src_cnt, dst_cnt - 1); in ppc440spe_dma01_prep_pq()
2405 sw_desc->async_tx.flags = flags; in ppc440spe_dma01_prep_pq()
2406 list_for_each_entry(iter, &sw_desc->group_list, in ppc440spe_dma01_prep_pq()
2410 iter->unmap_len = len; in ppc440spe_dma01_prep_pq()
2413 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_dma01_prep_pq()
2420 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, in ppc440spe_dma2_prep_pq() argument
2432 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_dma2_prep_pq()
2433 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len); in ppc440spe_dma2_prep_pq()
2435 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_dma2_prep_pq()
2445 sw_desc->async_tx.flags = flags; in ppc440spe_dma2_prep_pq()
2446 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { in ppc440spe_dma2_prep_pq()
2448 --op ? 0 : flags); in ppc440spe_dma2_prep_pq()
2451 iter->unmap_len = len; in ppc440spe_dma2_prep_pq()
2453 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor)); in ppc440spe_dma2_prep_pq()
2454 iter->rxor_cursor.len = len; in ppc440spe_dma2_prep_pq()
2455 iter->descs_per_op = descs_per_op; in ppc440spe_dma2_prep_pq()
2458 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { in ppc440spe_dma2_prep_pq()
2461 ppc440spe_adma_init_dma2rxor_slot(iter, src, in ppc440spe_dma2_prep_pq()
2463 if (likely(!list_is_last(&iter->chain_node, in ppc440spe_dma2_prep_pq()
2464 &sw_desc->group_list))) { in ppc440spe_dma2_prep_pq()
2466 iter->hw_next = in ppc440spe_dma2_prep_pq()
2467 list_entry(iter->chain_node.next, in ppc440spe_dma2_prep_pq()
2470 ppc440spe_xor_set_link(iter, iter->hw_next); in ppc440spe_dma2_prep_pq()
2473 iter->hw_next = NULL; in ppc440spe_dma2_prep_pq()
2478 sw_desc->dst_cnt = dst_cnt; in ppc440spe_dma2_prep_pq()
2480 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags); in ppc440spe_dma2_prep_pq()
2482 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags); in ppc440spe_dma2_prep_pq()
2484 /* setup dst/src/mult */ in ppc440spe_dma2_prep_pq()
2487 while (src_cnt--) { in ppc440spe_dma2_prep_pq()
2491 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], in ppc440spe_dma2_prep_pq()
2496 mult, src_cnt, dst_cnt - 1); in ppc440spe_dma2_prep_pq()
2499 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_dma2_prep_pq()
2505 * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
2508 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, in ppc440spe_adma_prep_dma_pq() argument
2518 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, in ppc440spe_adma_prep_dma_pq()
2519 dst, src, src_cnt)); in ppc440spe_adma_prep_dma_pq()
2524 if (src_cnt == 1 && dst[1] == src[0]) { in ppc440spe_adma_prep_dma_pq()
2530 dest[1] = ppc440spe_chan->qdest; in ppc440spe_adma_prep_dma_pq()
2532 dest, 2, src, src_cnt, scf, len, flags); in ppc440spe_adma_prep_dma_pq()
2533 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_pq()
2536 if (src_cnt == 2 && dst[1] == src[1]) { in ppc440spe_adma_prep_dma_pq()
2538 &dst[1], src, 2, scf, len, flags); in ppc440spe_adma_prep_dma_pq()
2539 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_pq()
2556 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_prep_dma_pq()
2558 ppc440spe_chan->device->id, __func__, src_cnt, len, in ppc440spe_adma_prep_dma_pq()
2561 switch (ppc440spe_chan->device->id) { in ppc440spe_adma_prep_dma_pq()
2565 dst, dst_cnt, src, src_cnt, scf, in ppc440spe_adma_prep_dma_pq()
2571 dst, dst_cnt, src, src_cnt, scf, in ppc440spe_adma_prep_dma_pq()
2576 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_pq()
2580 * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
2584 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, in ppc440spe_adma_prep_dma_pqzero_sum() argument
2605 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id, in ppc440spe_adma_prep_dma_pqzero_sum()
2606 src, src_cnt, scf)); in ppc440spe_adma_prep_dma_pqzero_sum()
2619 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_pqzero_sum()
2626 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_pqzero_sum()
2627 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { in ppc440spe_adma_prep_dma_pqzero_sum()
2630 iter->unmap_len = len; in ppc440spe_adma_prep_dma_pqzero_sum()
2637 iter = sw_desc->group_head; in ppc440spe_adma_prep_dma_pqzero_sum()
2638 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); in ppc440spe_adma_prep_dma_pqzero_sum()
2639 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_adma_prep_dma_pqzero_sum()
2640 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_adma_prep_dma_pqzero_sum()
2643 hw_desc = iter->hw_desc; in ppc440spe_adma_prep_dma_pqzero_sum()
2644 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_adma_prep_dma_pqzero_sum()
2645 iter->src_cnt = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2646 iter->dst_cnt = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2648 ppc440spe_chan->pdest, 0); in ppc440spe_adma_prep_dma_pqzero_sum()
2652 iter->unmap_len = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2654 pdest = ppc440spe_chan->pdest; in ppc440spe_adma_prep_dma_pqzero_sum()
2660 iter = list_first_entry(&sw_desc->group_list, in ppc440spe_adma_prep_dma_pqzero_sum()
2663 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); in ppc440spe_adma_prep_dma_pqzero_sum()
2666 iter = list_entry(iter->chain_node.next, in ppc440spe_adma_prep_dma_pqzero_sum()
2671 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_adma_prep_dma_pqzero_sum()
2672 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_adma_prep_dma_pqzero_sum()
2675 hw_desc = iter->hw_desc; in ppc440spe_adma_prep_dma_pqzero_sum()
2676 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; in ppc440spe_adma_prep_dma_pqzero_sum()
2677 iter->src_cnt = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2678 iter->dst_cnt = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2680 ppc440spe_chan->qdest, 0); in ppc440spe_adma_prep_dma_pqzero_sum()
2684 iter->unmap_len = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2686 qdest = ppc440spe_chan->qdest; in ppc440spe_adma_prep_dma_pqzero_sum()
2694 list_for_each_entry_reverse(iter, &sw_desc->group_list, in ppc440spe_adma_prep_dma_pqzero_sum()
2697 * The last CDB corresponds to Q-parity check, in ppc440spe_adma_prep_dma_pqzero_sum()
2699 * P-parity check in ppc440spe_adma_prep_dma_pqzero_sum()
2704 &iter->flags); in ppc440spe_adma_prep_dma_pqzero_sum()
2707 &iter->flags); in ppc440spe_adma_prep_dma_pqzero_sum()
2712 &iter->flags); in ppc440spe_adma_prep_dma_pqzero_sum()
2715 &iter->flags); in ppc440spe_adma_prep_dma_pqzero_sum()
2718 iter->xor_check_result = pqres; in ppc440spe_adma_prep_dma_pqzero_sum()
2724 *iter->xor_check_result = 0; in ppc440spe_adma_prep_dma_pqzero_sum()
2728 if (!(--dst_cnt)) in ppc440spe_adma_prep_dma_pqzero_sum()
2733 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list, in ppc440spe_adma_prep_dma_pqzero_sum()
2738 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); in ppc440spe_adma_prep_dma_pqzero_sum()
2741 src[src_cnt - 1]); in ppc440spe_adma_prep_dma_pqzero_sum()
2743 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 : in ppc440spe_adma_prep_dma_pqzero_sum()
2748 scf[src_cnt - 1]); in ppc440spe_adma_prep_dma_pqzero_sum()
2750 if (!(--src_cnt)) in ppc440spe_adma_prep_dma_pqzero_sum()
2754 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_prep_dma_pqzero_sum()
2755 return sw_desc ? &sw_desc->async_tx : NULL; in ppc440spe_adma_prep_dma_pqzero_sum()
2759 * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
2763 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, in ppc440spe_adma_prep_dma_xor_zero_sum() argument
2770 pq[0] = src[0]; in ppc440spe_adma_prep_dma_xor_zero_sum()
2774 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1], in ppc440spe_adma_prep_dma_xor_zero_sum()
2775 src_cnt - 1, 0, len, in ppc440spe_adma_prep_dma_xor_zero_sum()
2781 * ppc440spe_adma_set_dest - set destination address into descriptor
2788 BUG_ON(index >= sw_desc->dst_cnt); in ppc440spe_adma_set_dest()
2790 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_set_dest()
2792 switch (chan->device->id) { in ppc440spe_adma_set_dest()
2798 ppc440spe_desc_set_dest_addr(sw_desc->group_head, in ppc440spe_adma_set_dest()
2827 * ppc440spe_adma_pq_set_dest - set destination address into descriptor
2839 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_pq_set_dest()
2854 switch (chan->device->id) { in ppc440spe_adma_pq_set_dest()
2857 /* walk through the WXOR source list and set P/Q-destinations in ppc440spe_adma_pq_set_dest()
2860 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { in ppc440spe_adma_pq_set_dest()
2861 /* This is WXOR-only chain; may have 1/2 zero descs */ in ppc440spe_adma_pq_set_dest()
2862 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) in ppc440spe_adma_pq_set_dest()
2864 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) in ppc440spe_adma_pq_set_dest()
2871 &sw_desc->group_list, chain_node) in ppc440spe_adma_pq_set_dest()
2877 &sw_desc->group_list, chain_node) { in ppc440spe_adma_pq_set_dest()
2891 &sw_desc->flags)) { in ppc440spe_adma_pq_set_dest()
2899 &sw_desc->flags)) { in ppc440spe_adma_pq_set_dest()
2909 /* This is RXOR-only or RXOR/WXOR mixed chain */ in ppc440spe_adma_pq_set_dest()
2914 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? in ppc440spe_adma_pq_set_dest()
2918 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? in ppc440spe_adma_pq_set_dest()
2936 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) { in ppc440spe_adma_pq_set_dest()
2945 &sw_desc->group_list, in ppc440spe_adma_pq_set_dest()
2955 &sw_desc->group_list, in ppc440spe_adma_pq_set_dest()
2974 * two chains - one for each dest. in ppc440spe_adma_pq_set_dest()
2978 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? in ppc440spe_adma_pq_set_dest()
2983 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? in ppc440spe_adma_pq_set_dest()
2989 for (i = 0; i < sw_desc->descs_per_op; i++) { in ppc440spe_adma_pq_set_dest()
2993 iter = list_entry(iter->chain_node.next, in ppc440spe_adma_pq_set_dest()
3001 sw_desc->descs_per_op); in ppc440spe_adma_pq_set_dest()
3002 for (i = 0; i < sw_desc->descs_per_op; i++) { in ppc440spe_adma_pq_set_dest()
3005 iter = list_entry(iter->chain_node.next, in ppc440spe_adma_pq_set_dest()
3016 * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
3028 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_pqzero_sum_set_dest()
3030 /* walk through the WXOR source list and set P/Q-destinations in ppc440spe_adma_pqzero_sum_set_dest()
3035 list_for_each_entry_reverse(end, &sw_desc->group_list, in ppc440spe_adma_pqzero_sum_set_dest()
3037 if (!(--idx)) in ppc440spe_adma_pqzero_sum_set_dest()
3046 list_for_each_entry_from(iter, &sw_desc->group_list, in ppc440spe_adma_pqzero_sum_set_dest()
3058 list_for_each_entry_from(iter, &sw_desc->group_list, in ppc440spe_adma_pqzero_sum_set_dest()
3074 end = list_entry(end->chain_node.next, in ppc440spe_adma_pqzero_sum_set_dest()
3081 * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
3087 struct xor_cb *hw_desc = desc->hw_desc; in ppc440spe_desc_set_xor_src_cnt()
3089 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK; in ppc440spe_desc_set_xor_src_cnt()
3090 hw_desc->cbc |= src_cnt; in ppc440spe_desc_set_xor_src_cnt()
3094 * ppc440spe_adma_pq_set_src - set source address into descriptor
3103 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_pq_set_src()
3105 switch (chan->device->id) { in ppc440spe_adma_pq_set_src()
3110 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { in ppc440spe_adma_pq_set_src()
3111 /* RXOR-only or RXOR/WXOR operation */ in ppc440spe_adma_pq_set_src()
3113 &sw_desc->flags) ? 2 : 3; in ppc440spe_adma_pq_set_src()
3117 /* setup sources region (R1-2-3, R1-2-4, in ppc440spe_adma_pq_set_src()
3118 * or R1-2-5) in ppc440spe_adma_pq_set_src()
3121 &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3125 &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3129 &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3133 &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3152 index - iskip + sw_desc->dst_cnt); in ppc440spe_adma_pq_set_src()
3157 /* WXOR-only operation; skip first slots with in ppc440spe_adma_pq_set_src()
3160 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3162 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) in ppc440spe_adma_pq_set_src()
3174 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) && in ppc440spe_adma_pq_set_src()
3175 sw_desc->dst_cnt == 2) { in ppc440spe_adma_pq_set_src()
3188 iter = sw_desc->group_head; in ppc440spe_adma_pq_set_src()
3189 if (iter->dst_cnt == 2) { in ppc440spe_adma_pq_set_src()
3190 /* both P & Q calculations required; set P src here */ in ppc440spe_adma_pq_set_src()
3195 sw_desc->descs_per_op); in ppc440spe_adma_pq_set_src()
3203 * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
3211 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_memcpy_xor_set_src()
3212 sw_desc = sw_desc->group_head; in ppc440spe_adma_memcpy_xor_set_src()
3219 * ppc440spe_adma_dma2rxor_inc_addr -
3225 cursor->addr_count++; in ppc440spe_adma_dma2rxor_inc_addr()
3226 if (index == src_cnt - 1) { in ppc440spe_adma_dma2rxor_inc_addr()
3227 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); in ppc440spe_adma_dma2rxor_inc_addr()
3228 } else if (cursor->addr_count == XOR_MAX_OPS) { in ppc440spe_adma_dma2rxor_inc_addr()
3229 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); in ppc440spe_adma_dma2rxor_inc_addr()
3230 cursor->addr_count = 0; in ppc440spe_adma_dma2rxor_inc_addr()
3231 cursor->desc_count++; in ppc440spe_adma_dma2rxor_inc_addr()
3236 * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
3247 for (i = 0; i < cursor->desc_count; i++) { in ppc440spe_adma_dma2rxor_prep_src()
3248 desc = list_entry(hdesc->chain_node.next, in ppc440spe_adma_dma2rxor_prep_src()
3253 switch (cursor->state) { in ppc440spe_adma_dma2rxor_prep_src()
3255 if (addr == cursor->addrl + cursor->len) { in ppc440spe_adma_dma2rxor_prep_src()
3257 cursor->state = 1; in ppc440spe_adma_dma2rxor_prep_src()
3258 cursor->xor_count++; in ppc440spe_adma_dma2rxor_prep_src()
3259 if (index == src_cnt-1) { in ppc440spe_adma_dma2rxor_prep_src()
3261 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3266 } else if (cursor->addrl == addr + cursor->len) { in ppc440spe_adma_dma2rxor_prep_src()
3268 cursor->state = 1; in ppc440spe_adma_dma2rxor_prep_src()
3269 cursor->xor_count++; in ppc440spe_adma_dma2rxor_prep_src()
3270 set_bit(cursor->addr_count, &desc->reverse_flags[0]); in ppc440spe_adma_dma2rxor_prep_src()
3271 if (index == src_cnt-1) { in ppc440spe_adma_dma2rxor_prep_src()
3273 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3285 sign = test_bit(cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3286 desc->reverse_flags) in ppc440spe_adma_dma2rxor_prep_src()
3287 ? -1 : 1; in ppc440spe_adma_dma2rxor_prep_src()
3288 if (index == src_cnt-2 || (sign == -1 in ppc440spe_adma_dma2rxor_prep_src()
3289 && addr != cursor->addrl - 2*cursor->len)) { in ppc440spe_adma_dma2rxor_prep_src()
3290 cursor->state = 0; in ppc440spe_adma_dma2rxor_prep_src()
3291 cursor->xor_count = 1; in ppc440spe_adma_dma2rxor_prep_src()
3292 cursor->addrl = addr; in ppc440spe_adma_dma2rxor_prep_src()
3294 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3298 } else if (addr == cursor->addrl + 2*sign*cursor->len) { in ppc440spe_adma_dma2rxor_prep_src()
3299 cursor->state = 2; in ppc440spe_adma_dma2rxor_prep_src()
3300 cursor->xor_count = 0; in ppc440spe_adma_dma2rxor_prep_src()
3302 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3304 if (index == src_cnt-1) { in ppc440spe_adma_dma2rxor_prep_src()
3308 } else if (addr == cursor->addrl + 3*cursor->len) { in ppc440spe_adma_dma2rxor_prep_src()
3309 cursor->state = 2; in ppc440spe_adma_dma2rxor_prep_src()
3310 cursor->xor_count = 0; in ppc440spe_adma_dma2rxor_prep_src()
3312 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3314 if (index == src_cnt-1) { in ppc440spe_adma_dma2rxor_prep_src()
3318 } else if (addr == cursor->addrl + 4*cursor->len) { in ppc440spe_adma_dma2rxor_prep_src()
3319 cursor->state = 2; in ppc440spe_adma_dma2rxor_prep_src()
3320 cursor->xor_count = 0; in ppc440spe_adma_dma2rxor_prep_src()
3322 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3324 if (index == src_cnt-1) { in ppc440spe_adma_dma2rxor_prep_src()
3329 cursor->state = 0; in ppc440spe_adma_dma2rxor_prep_src()
3330 cursor->xor_count = 1; in ppc440spe_adma_dma2rxor_prep_src()
3331 cursor->addrl = addr; in ppc440spe_adma_dma2rxor_prep_src()
3333 cursor->addr_count, in ppc440spe_adma_dma2rxor_prep_src()
3340 cursor->state = 0; in ppc440spe_adma_dma2rxor_prep_src()
3341 cursor->addrl = addr; in ppc440spe_adma_dma2rxor_prep_src()
3342 cursor->xor_count++; in ppc440spe_adma_dma2rxor_prep_src()
3354 * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
3361 struct xor_cb *xcb = desc->hw_desc; in ppc440spe_adma_dma2rxor_set_src()
3369 desc = list_entry(desc->chain_node.next, in ppc440spe_adma_dma2rxor_set_src()
3371 xcb = desc->hw_desc; in ppc440spe_adma_dma2rxor_set_src()
3374 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == in ppc440spe_adma_dma2rxor_set_src()
3383 if (test_bit(k-1, desc->reverse_flags)) { in ppc440spe_adma_dma2rxor_set_src()
3385 if (index == op - 1) in ppc440spe_adma_dma2rxor_set_src()
3386 ppc440spe_rxor_set_src(desc, k - 1, addr); in ppc440spe_adma_dma2rxor_set_src()
3390 ppc440spe_rxor_set_src(desc, k - 1, addr); in ppc440spe_adma_dma2rxor_set_src()
3395 * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
3402 struct xor_cb *xcb = desc->hw_desc; in ppc440spe_adma_dma2rxor_set_mult()
3410 desc = list_entry(desc->chain_node.next, in ppc440spe_adma_dma2rxor_set_mult()
3413 xcb = desc->hw_desc; in ppc440spe_adma_dma2rxor_set_mult()
3416 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == in ppc440spe_adma_dma2rxor_set_mult()
3424 if (test_bit(k-1, desc->reverse_flags)) { in ppc440spe_adma_dma2rxor_set_mult()
3426 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult); in ppc440spe_adma_dma2rxor_set_mult()
3429 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult); in ppc440spe_adma_dma2rxor_set_mult()
3434 * ppc440spe_init_rxor_cursor -
3439 cursor->state = 2; in ppc440spe_init_rxor_cursor()
3443 * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
3454 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); in ppc440spe_adma_pq_set_src_mult()
3456 switch (chan->device->id) { in ppc440spe_adma_pq_set_src_mult()
3459 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { in ppc440spe_adma_pq_set_src_mult()
3461 &sw_desc->flags) ? 2 : 3; in ppc440spe_adma_pq_set_src_mult()
3466 sw_desc->dst_cnt - 1); in ppc440spe_adma_pq_set_src_mult()
3467 if (sw_desc->dst_cnt == 2) in ppc440spe_adma_pq_set_src_mult()
3476 index - region + in ppc440spe_adma_pq_set_src_mult()
3477 sw_desc->dst_cnt); in ppc440spe_adma_pq_set_src_mult()
3485 /* WXOR-only; in ppc440spe_adma_pq_set_src_mult()
3489 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) in ppc440spe_adma_pq_set_src_mult()
3491 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) in ppc440spe_adma_pq_set_src_mult()
3505 * we've just set Q mult. Set-up P now. in ppc440spe_adma_pq_set_src_mult()
3515 iter = sw_desc->group_head; in ppc440spe_adma_pq_set_src_mult()
3516 if (sw_desc->dst_cnt == 2) { in ppc440spe_adma_pq_set_src_mult()
3522 sw_desc->descs_per_op); in ppc440spe_adma_pq_set_src_mult()
3530 * ppc440spe_adma_free_chan_resources - free the resources allocated
3541 spin_lock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_free_chan_resources()
3542 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain, in ppc440spe_adma_free_chan_resources()
3545 list_del(&iter->chain_node); in ppc440spe_adma_free_chan_resources()
3548 &ppc440spe_chan->all_slots, slot_node) { in ppc440spe_adma_free_chan_resources()
3549 list_del(&iter->slot_node); in ppc440spe_adma_free_chan_resources()
3551 ppc440spe_chan->slots_allocated--; in ppc440spe_adma_free_chan_resources()
3553 ppc440spe_chan->last_used = NULL; in ppc440spe_adma_free_chan_resources()
3555 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_free_chan_resources()
3557 ppc440spe_chan->device->id, in ppc440spe_adma_free_chan_resources()
3558 __func__, ppc440spe_chan->slots_allocated); in ppc440spe_adma_free_chan_resources()
3559 spin_unlock_bh(&ppc440spe_chan->lock); in ppc440spe_adma_free_chan_resources()
3564 in_use_descs - 1); in ppc440spe_adma_free_chan_resources()
3568 * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
3590 * ppc440spe_adma_eot_handler - end of transfer interrupt handler
3596 dev_dbg(chan->device->common.dev, in ppc440spe_adma_eot_handler()
3597 "ppc440spe adma%d: %s\n", chan->device->id, __func__); in ppc440spe_adma_eot_handler()
3599 tasklet_schedule(&chan->irq_tasklet); in ppc440spe_adma_eot_handler()
3606 * ppc440spe_adma_err_handler - DMA error interrupt handler;
3613 dev_dbg(chan->device->common.dev, in ppc440spe_adma_err_handler()
3614 "ppc440spe adma%d: %s\n", chan->device->id, __func__); in ppc440spe_adma_err_handler()
3616 tasklet_schedule(&chan->irq_tasklet); in ppc440spe_adma_err_handler()
3623 * ppc440spe_test_callback - called when test operation has been done
3631 * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
3638 dev_dbg(ppc440spe_chan->device->common.dev, in ppc440spe_adma_issue_pending()
3639 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id, in ppc440spe_adma_issue_pending()
3640 __func__, ppc440spe_chan->pending); in ppc440spe_adma_issue_pending()
3642 if (ppc440spe_chan->pending) { in ppc440spe_adma_issue_pending()
3643 ppc440spe_chan->pending = 0; in ppc440spe_adma_issue_pending()
3649 * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
3659 dev_dbg(chan->device->common.dev, in ppc440spe_chan_start_null_xor()
3660 "ppc440spe adma%d: %s\n", chan->device->id, __func__); in ppc440spe_chan_start_null_xor()
3662 spin_lock_bh(&chan->lock); in ppc440spe_chan_start_null_xor()
3666 group_start = sw_desc->group_head; in ppc440spe_chan_start_null_xor()
3667 list_splice_init(&sw_desc->group_list, &chan->chain); in ppc440spe_chan_start_null_xor()
3668 async_tx_ack(&sw_desc->async_tx); in ppc440spe_chan_start_null_xor()
3671 cookie = dma_cookie_assign(&sw_desc->async_tx); in ppc440spe_chan_start_null_xor()
3676 chan->common.completed_cookie = cookie - 1; in ppc440spe_chan_start_null_xor()
3689 chan->device->id); in ppc440spe_chan_start_null_xor()
3690 spin_unlock_bh(&chan->lock); in ppc440spe_chan_start_null_xor()
3694 * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
3696 * and destination addresses, the GF-multiplier is 1; so if RAID-6
3697 * capabilities are enabled then we'll get src/dst filled with zero.
3712 return -ENOMEM; in ppc440spe_test_raid6()
3714 spin_lock_bh(&chan->lock); in ppc440spe_test_raid6()
3717 /* 1 src, 1 dsr, int_ena, WXOR */ in ppc440spe_test_raid6()
3719 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { in ppc440spe_test_raid6()
3721 iter->unmap_len = PAGE_SIZE; in ppc440spe_test_raid6()
3724 rval = -EFAULT; in ppc440spe_test_raid6()
3725 spin_unlock_bh(&chan->lock); in ppc440spe_test_raid6()
3728 spin_unlock_bh(&chan->lock); in ppc440spe_test_raid6()
3732 dma_addr = dma_map_page(chan->device->dev, pg, 0, in ppc440spe_test_raid6()
3742 async_tx_ack(&sw_desc->async_tx); in ppc440spe_test_raid6()
3743 sw_desc->async_tx.callback = ppc440spe_test_callback; in ppc440spe_test_raid6()
3744 sw_desc->async_tx.callback_param = NULL; in ppc440spe_test_raid6()
3748 ppc440spe_adma_tx_submit(&sw_desc->async_tx); in ppc440spe_test_raid6()
3749 ppc440spe_adma_issue_pending(&chan->common); in ppc440spe_test_raid6()
3755 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) { in ppc440spe_test_raid6()
3756 /* page is zero - RAID-6 enabled */ in ppc440spe_test_raid6()
3759 /* RAID-6 was not enabled */ in ppc440spe_test_raid6()
3760 rval = -EINVAL; in ppc440spe_test_raid6()
3769 switch (adev->id) { in ppc440spe_adma_init_capabilities()
3772 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3773 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3774 dma_cap_set(DMA_PQ, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3775 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3776 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3779 dma_cap_set(DMA_XOR, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3780 dma_cap_set(DMA_PQ, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3781 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); in ppc440spe_adma_init_capabilities()
3782 adev->common.cap_mask = adev->common.cap_mask; in ppc440spe_adma_init_capabilities()
3787 adev->common.device_alloc_chan_resources = in ppc440spe_adma_init_capabilities()
3789 adev->common.device_free_chan_resources = in ppc440spe_adma_init_capabilities()
3791 adev->common.device_tx_status = ppc440spe_adma_tx_status; in ppc440spe_adma_init_capabilities()
3792 adev->common.device_issue_pending = ppc440spe_adma_issue_pending; in ppc440spe_adma_init_capabilities()
3795 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3796 adev->common.device_prep_dma_memcpy = in ppc440spe_adma_init_capabilities()
3799 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3800 adev->common.max_xor = XOR_MAX_OPS; in ppc440spe_adma_init_capabilities()
3801 adev->common.device_prep_dma_xor = in ppc440spe_adma_init_capabilities()
3804 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3805 switch (adev->id) { in ppc440spe_adma_init_capabilities()
3807 dma_set_maxpq(&adev->common, in ppc440spe_adma_init_capabilities()
3811 dma_set_maxpq(&adev->common, in ppc440spe_adma_init_capabilities()
3815 adev->common.max_pq = XOR_MAX_OPS * 3; in ppc440spe_adma_init_capabilities()
3818 adev->common.device_prep_dma_pq = in ppc440spe_adma_init_capabilities()
3821 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3822 switch (adev->id) { in ppc440spe_adma_init_capabilities()
3824 adev->common.max_pq = DMA0_FIFO_SIZE / in ppc440spe_adma_init_capabilities()
3828 adev->common.max_pq = DMA1_FIFO_SIZE / in ppc440spe_adma_init_capabilities()
3832 adev->common.device_prep_dma_pq_val = in ppc440spe_adma_init_capabilities()
3835 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3836 switch (adev->id) { in ppc440spe_adma_init_capabilities()
3838 adev->common.max_xor = DMA0_FIFO_SIZE / in ppc440spe_adma_init_capabilities()
3842 adev->common.max_xor = DMA1_FIFO_SIZE / in ppc440spe_adma_init_capabilities()
3846 adev->common.device_prep_dma_xor_val = in ppc440spe_adma_init_capabilities()
3849 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) { in ppc440spe_adma_init_capabilities()
3850 adev->common.device_prep_dma_interrupt = in ppc440spe_adma_init_capabilities()
3855 dev_name(adev->dev), in ppc440spe_adma_init_capabilities()
3856 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", in ppc440spe_adma_init_capabilities()
3857 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", in ppc440spe_adma_init_capabilities()
3858 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", in ppc440spe_adma_init_capabilities()
3859 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", in ppc440spe_adma_init_capabilities()
3860 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", in ppc440spe_adma_init_capabilities()
3861 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); in ppc440spe_adma_init_capabilities()
3872 ofdev = container_of(adev->dev, struct platform_device, dev); in ppc440spe_adma_setup_irqs()
3873 np = ofdev->dev.of_node; in ppc440spe_adma_setup_irqs()
3874 if (adev->id != PPC440SPE_XOR_ID) { in ppc440spe_adma_setup_irqs()
3875 adev->err_irq = irq_of_parse_and_map(np, 1); in ppc440spe_adma_setup_irqs()
3876 if (!adev->err_irq) { in ppc440spe_adma_setup_irqs()
3877 dev_warn(adev->dev, "no err irq resource?\n"); in ppc440spe_adma_setup_irqs()
3879 adev->err_irq = -ENXIO; in ppc440spe_adma_setup_irqs()
3883 adev->err_irq = -ENXIO; in ppc440spe_adma_setup_irqs()
3886 adev->irq = irq_of_parse_and_map(np, 0); in ppc440spe_adma_setup_irqs()
3887 if (!adev->irq) { in ppc440spe_adma_setup_irqs()
3888 dev_err(adev->dev, "no irq resource\n"); in ppc440spe_adma_setup_irqs()
3890 ret = -ENXIO; in ppc440spe_adma_setup_irqs()
3893 dev_dbg(adev->dev, "irq %d, err irq %d\n", in ppc440spe_adma_setup_irqs()
3894 adev->irq, adev->err_irq); in ppc440spe_adma_setup_irqs()
3896 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler, in ppc440spe_adma_setup_irqs()
3897 0, dev_driver_string(adev->dev), chan); in ppc440spe_adma_setup_irqs()
3899 dev_err(adev->dev, "can't request irq %d\n", in ppc440spe_adma_setup_irqs()
3900 adev->irq); in ppc440spe_adma_setup_irqs()
3902 ret = -EIO; in ppc440spe_adma_setup_irqs()
3909 if (adev->err_irq > 0) { in ppc440spe_adma_setup_irqs()
3911 ret = request_irq(adev->err_irq, in ppc440spe_adma_setup_irqs()
3914 dev_driver_string(adev->dev), in ppc440spe_adma_setup_irqs()
3917 dev_err(adev->dev, "can't request irq %d\n", in ppc440spe_adma_setup_irqs()
3918 adev->err_irq); in ppc440spe_adma_setup_irqs()
3920 ret = -EIO; in ppc440spe_adma_setup_irqs()
3925 if (adev->id == PPC440SPE_XOR_ID) { in ppc440spe_adma_setup_irqs()
3929 &adev->xor_reg->ier); in ppc440spe_adma_setup_irqs()
3933 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); in ppc440spe_adma_setup_irqs()
3937 ret = -ENODEV; in ppc440spe_adma_setup_irqs()
3940 adev->i2o_reg = of_iomap(np, 0); in ppc440spe_adma_setup_irqs()
3941 if (!adev->i2o_reg) { in ppc440spe_adma_setup_irqs()
3944 ret = -EINVAL; in ppc440spe_adma_setup_irqs()
3951 enable = (adev->id == PPC440SPE_DMA0_ID) ? in ppc440spe_adma_setup_irqs()
3954 mask = ioread32(&adev->i2o_reg->iopim) & enable; in ppc440spe_adma_setup_irqs()
3955 iowrite32(mask, &adev->i2o_reg->iopim); in ppc440spe_adma_setup_irqs()
3960 free_irq(adev->irq, chan); in ppc440spe_adma_setup_irqs()
3962 irq_dispose_mapping(adev->irq); in ppc440spe_adma_setup_irqs()
3964 if (adev->err_irq > 0) { in ppc440spe_adma_setup_irqs()
3966 irq_dispose_mapping(adev->err_irq); in ppc440spe_adma_setup_irqs()
3976 if (adev->id == PPC440SPE_XOR_ID) { in ppc440spe_adma_release_irqs()
3978 mask = ioread32be(&adev->xor_reg->ier); in ppc440spe_adma_release_irqs()
3981 iowrite32be(mask, &adev->xor_reg->ier); in ppc440spe_adma_release_irqs()
3984 disable = (adev->id == PPC440SPE_DMA0_ID) ? in ppc440spe_adma_release_irqs()
3987 mask = ioread32(&adev->i2o_reg->iopim) | disable; in ppc440spe_adma_release_irqs()
3988 iowrite32(mask, &adev->i2o_reg->iopim); in ppc440spe_adma_release_irqs()
3990 free_irq(adev->irq, chan); in ppc440spe_adma_release_irqs()
3991 irq_dispose_mapping(adev->irq); in ppc440spe_adma_release_irqs()
3992 if (adev->err_irq > 0) { in ppc440spe_adma_release_irqs()
3993 free_irq(adev->err_irq, chan); in ppc440spe_adma_release_irqs()
3995 irq_dispose_mapping(adev->err_irq); in ppc440spe_adma_release_irqs()
3996 iounmap(adev->i2o_reg); in ppc440spe_adma_release_irqs()
4002 * ppc440spe_adma_probe - probe the asynch device
4006 struct device_node *np = ofdev->dev.of_node; in ppc440spe_adma_probe()
4017 if (of_device_is_compatible(np, "amcc,xor-accelerator")) { in ppc440spe_adma_probe()
4026 idx = of_get_property(np, "cell-index", &len); in ppc440spe_adma_probe()
4028 dev_err(&ofdev->dev, "Device node %pOF has missing " in ppc440spe_adma_probe()
4029 "or invalid cell-index property\n", in ppc440spe_adma_probe()
4031 return -EINVAL; in ppc440spe_adma_probe()
4051 dev_err(&ofdev->dev, "failed to get memory resource\n"); in ppc440spe_adma_probe()
4053 ret = -ENODEV; in ppc440spe_adma_probe()
4058 dev_driver_string(&ofdev->dev))) { in ppc440spe_adma_probe()
4059 dev_err(&ofdev->dev, "failed to request memory region %pR\n", in ppc440spe_adma_probe()
4062 ret = -EBUSY; in ppc440spe_adma_probe()
4070 ret = -ENOMEM; in ppc440spe_adma_probe()
4074 adev->id = id; in ppc440spe_adma_probe()
4075 adev->pool_size = pool_size; in ppc440spe_adma_probe()
4077 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev, in ppc440spe_adma_probe()
4078 adev->pool_size, &adev->dma_desc_pool, in ppc440spe_adma_probe()
4080 if (adev->dma_desc_pool_virt == NULL) { in ppc440spe_adma_probe()
4081 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent " in ppc440spe_adma_probe()
4083 adev->pool_size); in ppc440spe_adma_probe()
4085 ret = -ENOMEM; in ppc440spe_adma_probe()
4088 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n", in ppc440spe_adma_probe()
4089 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); in ppc440spe_adma_probe()
4093 dev_err(&ofdev->dev, "failed to ioremap regs!\n"); in ppc440spe_adma_probe()
4094 ret = -ENOMEM; in ppc440spe_adma_probe()
4098 if (adev->id == PPC440SPE_XOR_ID) { in ppc440spe_adma_probe()
4099 adev->xor_reg = regs; in ppc440spe_adma_probe()
4101 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr); in ppc440spe_adma_probe()
4102 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr); in ppc440spe_adma_probe()
4104 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ? in ppc440spe_adma_probe()
4106 adev->dma_reg = regs; in ppc440spe_adma_probe()
4108 * <fsiz> - is defined in number of CDB pointers (8byte). in ppc440spe_adma_probe()
4112 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2), in ppc440spe_adma_probe()
4113 &adev->dma_reg->fsiz); in ppc440spe_adma_probe()
4116 &adev->dma_reg->cfg); in ppc440spe_adma_probe()
4118 iowrite32(~0, &adev->dma_reg->dsts); in ppc440spe_adma_probe()
4121 adev->dev = &ofdev->dev; in ppc440spe_adma_probe()
4122 adev->common.dev = &ofdev->dev; in ppc440spe_adma_probe()
4123 INIT_LIST_HEAD(&adev->common.channels); in ppc440spe_adma_probe()
4130 ret = -ENOMEM; in ppc440spe_adma_probe()
4134 spin_lock_init(&chan->lock); in ppc440spe_adma_probe()
4135 INIT_LIST_HEAD(&chan->chain); in ppc440spe_adma_probe()
4136 INIT_LIST_HEAD(&chan->all_slots); in ppc440spe_adma_probe()
4137 chan->device = adev; in ppc440spe_adma_probe()
4138 chan->common.device = &adev->common; in ppc440spe_adma_probe()
4139 dma_cookie_init(&chan->common); in ppc440spe_adma_probe()
4140 list_add_tail(&chan->common.device_node, &adev->common.channels); in ppc440spe_adma_probe()
4141 tasklet_setup(&chan->irq_tasklet, ppc440spe_adma_tasklet); in ppc440spe_adma_probe()
4146 if (adev->id != PPC440SPE_XOR_ID) { in ppc440spe_adma_probe()
4147 chan->pdest_page = alloc_page(GFP_KERNEL); in ppc440spe_adma_probe()
4148 chan->qdest_page = alloc_page(GFP_KERNEL); in ppc440spe_adma_probe()
4149 if (!chan->pdest_page || in ppc440spe_adma_probe()
4150 !chan->qdest_page) { in ppc440spe_adma_probe()
4151 if (chan->pdest_page) in ppc440spe_adma_probe()
4152 __free_page(chan->pdest_page); in ppc440spe_adma_probe()
4153 if (chan->qdest_page) in ppc440spe_adma_probe()
4154 __free_page(chan->qdest_page); in ppc440spe_adma_probe()
4155 ret = -ENOMEM; in ppc440spe_adma_probe()
4158 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0, in ppc440spe_adma_probe()
4160 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0, in ppc440spe_adma_probe()
4166 ref->chan = &chan->common; in ppc440spe_adma_probe()
4167 INIT_LIST_HEAD(&ref->node); in ppc440spe_adma_probe()
4168 list_add_tail(&ref->node, &ppc440spe_adma_chan_list); in ppc440spe_adma_probe()
4170 dev_err(&ofdev->dev, "failed to allocate channel reference!\n"); in ppc440spe_adma_probe()
4171 ret = -ENOMEM; in ppc440spe_adma_probe()
4181 ret = dma_async_device_register(&adev->common); in ppc440spe_adma_probe()
4184 dev_err(&ofdev->dev, "failed to register dma device\n"); in ppc440spe_adma_probe()
4194 if (chan == to_ppc440spe_adma_chan(ref->chan)) { in ppc440spe_adma_probe()
4195 list_del(&ref->node); in ppc440spe_adma_probe()
4200 if (adev->id != PPC440SPE_XOR_ID) { in ppc440spe_adma_probe()
4201 dma_unmap_page(&ofdev->dev, chan->pdest, in ppc440spe_adma_probe()
4203 dma_unmap_page(&ofdev->dev, chan->qdest, in ppc440spe_adma_probe()
4205 __free_page(chan->pdest_page); in ppc440spe_adma_probe()
4206 __free_page(chan->qdest_page); in ppc440spe_adma_probe()
4211 if (adev->id == PPC440SPE_XOR_ID) in ppc440spe_adma_probe()
4212 iounmap(adev->xor_reg); in ppc440spe_adma_probe()
4214 iounmap(adev->dma_reg); in ppc440spe_adma_probe()
4216 dma_free_coherent(adev->dev, adev->pool_size, in ppc440spe_adma_probe()
4217 adev->dma_desc_pool_virt, in ppc440spe_adma_probe()
4218 adev->dma_desc_pool); in ppc440spe_adma_probe()
4231 * ppc440spe_adma_remove - remove the asynch device
4236 struct device_node *np = ofdev->dev.of_node; in ppc440spe_adma_remove()
4242 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) in ppc440spe_adma_remove()
4243 ppc440spe_adma_devices[adev->id] = -1; in ppc440spe_adma_remove()
4245 dma_async_device_unregister(&adev->common); in ppc440spe_adma_remove()
4247 list_for_each_entry_safe(chan, _chan, &adev->common.channels, in ppc440spe_adma_remove()
4251 tasklet_kill(&ppc440spe_chan->irq_tasklet); in ppc440spe_adma_remove()
4252 if (adev->id != PPC440SPE_XOR_ID) { in ppc440spe_adma_remove()
4253 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest, in ppc440spe_adma_remove()
4255 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest, in ppc440spe_adma_remove()
4257 __free_page(ppc440spe_chan->pdest_page); in ppc440spe_adma_remove()
4258 __free_page(ppc440spe_chan->qdest_page); in ppc440spe_adma_remove()
4263 to_ppc440spe_adma_chan(ref->chan)) { in ppc440spe_adma_remove()
4264 list_del(&ref->node); in ppc440spe_adma_remove()
4268 list_del(&chan->device_node); in ppc440spe_adma_remove()
4272 dma_free_coherent(adev->dev, adev->pool_size, in ppc440spe_adma_remove()
4273 adev->dma_desc_pool_virt, adev->dma_desc_pool); in ppc440spe_adma_remove()
4274 if (adev->id == PPC440SPE_XOR_ID) in ppc440spe_adma_remove()
4275 iounmap(adev->xor_reg); in ppc440spe_adma_remove()
4277 iounmap(adev->dma_reg); in ppc440spe_adma_remove()
4284 * /sys driver interface to enable h/w RAID-6 capabilities
4288 * "enable" is used to enable RAID-6 capabilities or to check
4299 if (ppc440spe_adma_devices[i] == -1) in devices_show()
4301 size += sysfs_emit_at(buf, size, "PPC440SP(E)-ADMA.%d: %s\n", in devices_show()
4310 return sysfs_emit(buf, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", in enable_show()
4321 return -EINVAL; in enable_store()
4324 return -EFAULT; in enable_store()
4336 pr_info("PPC440SP(e) RAID-6 has been activated " in enable_store()
4340 pr_info("PPC440SP(e) RAID-6 hasn't been activated!" in enable_store()
4362 size = sysfs_emit(buf, "PPC440SP(e) RAID-6 driver " in poly_show()
4374 return -EINVAL; in poly_store()
4378 return -EINVAL; in poly_store()
4386 return -EINVAL; in poly_store()
4412 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); in ppc440spe_configure_raid_devices()
4416 return -ENODEV; in ppc440spe_configure_raid_devices()
4421 return -EINVAL; in ppc440spe_configure_raid_devices()
4428 return -EINVAL; in ppc440spe_configure_raid_devices()
4438 return -ENODEV; in ppc440spe_configure_raid_devices()
4446 return -ENODEV; in ppc440spe_configure_raid_devices()
4461 return -ENOMEM; in ppc440spe_configure_raid_devices()
4478 iowrite32(0, &i2o_reg->ifbah); in ppc440spe_configure_raid_devices()
4479 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal); in ppc440spe_configure_raid_devices()
4485 iowrite32(0, &i2o_reg->ifsiz); in ppc440spe_configure_raid_devices()
4492 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe"); in ppc440spe_configure_raid_devices()
4496 ret = -ENODEV; in ppc440spe_configure_raid_devices()
4505 ret = -ENODEV; in ppc440spe_configure_raid_devices()
4512 ret = -ENODEV; in ppc440spe_configure_raid_devices()
4522 * - LL transaction passing limit to 1; in ppc440spe_configure_raid_devices()
4523 * - Memory controller cycle limit to 1; in ppc440spe_configure_raid_devices()
4524 * - Galois Polynomial to 0x14d (default) in ppc440spe_configure_raid_devices()
4532 ppc440spe_adma_devices[i] = -1; in ppc440spe_configure_raid_devices()
4544 { .compatible = "ibm,dma-440spe", },
4545 { .compatible = "amcc,xor-accelerator", },
4554 .name = "PPC440SP(E)-ADMA",
4580 /* RAID-6 h/w enable entry */ in ppc440spe_adma_init()
4598 /* User will not be able to enable h/w RAID-6 */ in ppc440spe_adma_init()
4599 pr_err("%s: failed to create RAID-6 driver interface\n", in ppc440spe_adma_init()