1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * AMD Cryptographic Coprocessor (CCP) driver
4   *
5   * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6   *
7   * Author: Tom Lendacky <thomas.lendacky@amd.com>
8   * Author: Gary R Hook <gary.hook@amd.com>
9   */
10  
11  #ifndef __CCP_DEV_H__
12  #define __CCP_DEV_H__
13  
14  #include <linux/device.h>
15  #include <linux/spinlock.h>
16  #include <linux/mutex.h>
17  #include <linux/list.h>
18  #include <linux/wait.h>
19  #include <linux/dma-direction.h>
20  #include <linux/dmapool.h>
21  #include <linux/hw_random.h>
22  #include <linux/bitops.h>
23  #include <linux/interrupt.h>
24  #include <linux/irqreturn.h>
25  #include <linux/dmaengine.h>
26  
27  #include "sp-dev.h"
28  
29  #define MAX_CCP_NAME_LEN		16
30  #define MAX_DMAPOOL_NAME_LEN		32
31  
32  #define MAX_HW_QUEUES			5
33  #define MAX_CMD_QLEN			100
34  
35  #define TRNG_RETRIES			10
36  
37  #define CACHE_NONE			0x00
38  #define CACHE_WB_NO_ALLOC		0xb7
39  
40  /****** Register Mappings ******/
41  #define Q_MASK_REG			0x000
42  #define TRNG_OUT_REG			0x00c
43  #define IRQ_MASK_REG			0x040
44  #define IRQ_STATUS_REG			0x200
45  
46  #define DEL_CMD_Q_JOB			0x124
47  #define DEL_Q_ACTIVE			0x00000200
48  #define DEL_Q_ID_SHIFT			6
49  
50  #define CMD_REQ0			0x180
51  #define CMD_REQ_INCR			0x04
52  
53  #define CMD_Q_STATUS_BASE		0x210
54  #define CMD_Q_INT_STATUS_BASE		0x214
55  #define CMD_Q_STATUS_INCR		0x20
56  
57  #define CMD_Q_CACHE_BASE		0x228
58  #define CMD_Q_CACHE_INC			0x20
59  
60  #define CMD_Q_ERROR(__qs)		((__qs) & 0x0000003f)
61  #define CMD_Q_DEPTH(__qs)		(((__qs) >> 12) & 0x0000000f)
62  
63  /* ------------------------ CCP Version 5 Specifics ------------------------ */
64  #define CMD5_QUEUE_MASK_OFFSET		0x00
65  #define	CMD5_QUEUE_PRIO_OFFSET		0x04
66  #define CMD5_REQID_CONFIG_OFFSET	0x08
67  #define	CMD5_CMD_TIMEOUT_OFFSET		0x10
68  #define LSB_PUBLIC_MASK_LO_OFFSET	0x18
69  #define LSB_PUBLIC_MASK_HI_OFFSET	0x1C
70  #define LSB_PRIVATE_MASK_LO_OFFSET	0x20
71  #define LSB_PRIVATE_MASK_HI_OFFSET	0x24
72  #define CMD5_PSP_CCP_VERSION		0x100
73  
74  #define CMD5_Q_CONTROL_BASE		0x0000
75  #define CMD5_Q_TAIL_LO_BASE		0x0004
76  #define CMD5_Q_HEAD_LO_BASE		0x0008
77  #define CMD5_Q_INT_ENABLE_BASE		0x000C
78  #define CMD5_Q_INTERRUPT_STATUS_BASE	0x0010
79  
80  #define CMD5_Q_STATUS_BASE		0x0100
81  #define CMD5_Q_INT_STATUS_BASE		0x0104
82  #define CMD5_Q_DMA_STATUS_BASE		0x0108
83  #define CMD5_Q_DMA_READ_STATUS_BASE	0x010C
84  #define CMD5_Q_DMA_WRITE_STATUS_BASE	0x0110
85  #define CMD5_Q_ABORT_BASE		0x0114
86  #define CMD5_Q_AX_CACHE_BASE		0x0118
87  
88  #define	CMD5_CONFIG_0_OFFSET		0x6000
89  #define	CMD5_TRNG_CTL_OFFSET		0x6008
90  #define	CMD5_AES_MASK_OFFSET		0x6010
91  #define	CMD5_CLK_GATE_CTL_OFFSET	0x603C
92  
93  /* Address offset between two virtual queue registers */
94  #define CMD5_Q_STATUS_INCR		0x1000
95  
96  /* Bit masks */
97  #define CMD5_Q_RUN			0x1
98  #define CMD5_Q_HALT			0x2
99  #define CMD5_Q_MEM_LOCATION		0x4
100  #define CMD5_Q_SIZE			0x1F
101  #define CMD5_Q_SHIFT			3
102  #define COMMANDS_PER_QUEUE		16
103  #define QUEUE_SIZE_VAL			((ffs(COMMANDS_PER_QUEUE) - 2) & \
104  					  CMD5_Q_SIZE)
105  #define Q_PTR_MASK			(2 << (QUEUE_SIZE_VAL + 5) - 1)
106  #define Q_DESC_SIZE			sizeof(struct ccp5_desc)
107  #define Q_SIZE(n)			(COMMANDS_PER_QUEUE*(n))
108  
109  #define INT_COMPLETION			0x1
110  #define INT_ERROR			0x2
111  #define INT_QUEUE_STOPPED		0x4
112  #define	INT_EMPTY_QUEUE			0x8
113  #define SUPPORTED_INTERRUPTS		(INT_COMPLETION | INT_ERROR)
114  
115  #define LSB_REGION_WIDTH		5
116  #define MAX_LSB_CNT			8
117  
118  #define LSB_SIZE			16
119  #define LSB_ITEM_SIZE			32
120  #define PLSB_MAP_SIZE			(LSB_SIZE)
121  #define SLSB_MAP_SIZE			(MAX_LSB_CNT * LSB_SIZE)
122  
123  #define LSB_ENTRY_NUMBER(LSB_ADDR)	(LSB_ADDR / LSB_ITEM_SIZE)
124  
125  /* ------------------------ CCP Version 3 Specifics ------------------------ */
126  #define REQ0_WAIT_FOR_WRITE		0x00000004
127  #define REQ0_INT_ON_COMPLETE		0x00000002
128  #define REQ0_STOP_ON_COMPLETE		0x00000001
129  
130  #define REQ0_CMD_Q_SHIFT		9
131  #define REQ0_JOBID_SHIFT		3
132  
133  /****** REQ1 Related Values ******/
134  #define REQ1_PROTECT_SHIFT		27
135  #define REQ1_ENGINE_SHIFT		23
136  #define REQ1_KEY_KSB_SHIFT		2
137  
138  #define REQ1_EOM			0x00000002
139  #define REQ1_INIT			0x00000001
140  
141  /* AES Related Values */
142  #define REQ1_AES_TYPE_SHIFT		21
143  #define REQ1_AES_MODE_SHIFT		18
144  #define REQ1_AES_ACTION_SHIFT		17
145  #define REQ1_AES_CFB_SIZE_SHIFT		10
146  
147  /* XTS-AES Related Values */
148  #define REQ1_XTS_AES_SIZE_SHIFT		10
149  
150  /* SHA Related Values */
151  #define REQ1_SHA_TYPE_SHIFT		21
152  
153  /* RSA Related Values */
154  #define REQ1_RSA_MOD_SIZE_SHIFT		10
155  
156  /* Pass-Through Related Values */
157  #define REQ1_PT_BW_SHIFT		12
158  #define REQ1_PT_BS_SHIFT		10
159  
160  /* ECC Related Values */
161  #define REQ1_ECC_AFFINE_CONVERT		0x00200000
162  #define REQ1_ECC_FUNCTION_SHIFT		18
163  
164  /****** REQ4 Related Values ******/
165  #define REQ4_KSB_SHIFT			18
166  #define REQ4_MEMTYPE_SHIFT		16
167  
168  /****** REQ6 Related Values ******/
169  #define REQ6_MEMTYPE_SHIFT		16
170  
171  /****** Key Storage Block ******/
172  #define KSB_START			77
173  #define KSB_END				127
174  #define KSB_COUNT			(KSB_END - KSB_START + 1)
175  #define CCP_SB_BITS			256
176  
177  #define CCP_JOBID_MASK			0x0000003f
178  
179  /* ------------------------ General CCP Defines ------------------------ */
180  
181  #define	CCP_DMA_DFLT			0x0
182  #define	CCP_DMA_PRIV			0x1
183  #define	CCP_DMA_PUB			0x2
184  
185  #define CCP_DMAPOOL_MAX_SIZE		64
186  #define CCP_DMAPOOL_ALIGN		BIT(5)
187  
188  #define CCP_REVERSE_BUF_SIZE		64
189  
190  #define CCP_AES_KEY_SB_COUNT		1
191  #define CCP_AES_CTX_SB_COUNT		1
192  
193  #define CCP_XTS_AES_KEY_SB_COUNT	1
194  #define CCP5_XTS_AES_KEY_SB_COUNT	2
195  #define CCP_XTS_AES_CTX_SB_COUNT	1
196  
197  #define CCP_DES3_KEY_SB_COUNT		1
198  #define CCP_DES3_CTX_SB_COUNT		1
199  
200  #define CCP_SHA_SB_COUNT		1
201  
202  #define CCP_RSA_MAX_WIDTH		4096
203  #define CCP5_RSA_MAX_WIDTH		16384
204  
205  #define CCP_PASSTHRU_BLOCKSIZE		256
206  #define CCP_PASSTHRU_MASKSIZE		32
207  #define CCP_PASSTHRU_SB_COUNT		1
208  
209  #define CCP_ECC_MODULUS_BYTES		48      /* 384-bits */
210  #define CCP_ECC_MAX_OPERANDS		6
211  #define CCP_ECC_MAX_OUTPUTS		3
212  #define CCP_ECC_SRC_BUF_SIZE		448
213  #define CCP_ECC_DST_BUF_SIZE		192
214  #define CCP_ECC_OPERAND_SIZE		64
215  #define CCP_ECC_OUTPUT_SIZE		64
216  #define CCP_ECC_RESULT_OFFSET		60
217  #define CCP_ECC_RESULT_SUCCESS		0x0001
218  
219  #define CCP_SB_BYTES			32
220  
221  struct ccp_op;
222  struct ccp_device;
223  struct ccp_cmd;
224  struct ccp_fns;
225  
226  struct ccp_dma_cmd {
227  	struct list_head entry;
228  
229  	struct ccp_cmd ccp_cmd;
230  };
231  
232  struct ccp_dma_desc {
233  	struct list_head entry;
234  
235  	struct ccp_device *ccp;
236  
237  	struct list_head pending;
238  	struct list_head active;
239  
240  	enum dma_status status;
241  	struct dma_async_tx_descriptor tx_desc;
242  	size_t len;
243  };
244  
245  struct ccp_dma_chan {
246  	struct ccp_device *ccp;
247  
248  	spinlock_t lock;
249  	struct list_head created;
250  	struct list_head pending;
251  	struct list_head active;
252  	struct list_head complete;
253  
254  	struct tasklet_struct cleanup_tasklet;
255  
256  	enum dma_status status;
257  	struct dma_chan dma_chan;
258  };
259  
260  struct ccp_cmd_queue {
261  	struct ccp_device *ccp;
262  
263  	/* Queue identifier */
264  	u32 id;
265  
266  	/* Queue dma pool */
267  	struct dma_pool *dma_pool;
268  
269  	/* Queue base address (not neccessarily aligned)*/
270  	struct ccp5_desc *qbase;
271  
272  	/* Aligned queue start address (per requirement) */
273  	struct mutex q_mutex ____cacheline_aligned;
274  	unsigned int qidx;
275  
276  	/* Version 5 has different requirements for queue memory */
277  	unsigned int qsize;
278  	dma_addr_t qbase_dma;
279  	dma_addr_t qdma_tail;
280  
281  	/* Per-queue reserved storage block(s) */
282  	u32 sb_key;
283  	u32 sb_ctx;
284  
285  	/* Bitmap of LSBs that can be accessed by this queue */
286  	DECLARE_BITMAP(lsbmask, MAX_LSB_CNT);
287  	/* Private LSB that is assigned to this queue, or -1 if none.
288  	 * Bitmap for my private LSB, unused otherwise
289  	 */
290  	int lsb;
291  	DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
292  
293  	/* Queue processing thread */
294  	struct task_struct *kthread;
295  	unsigned int active;
296  	unsigned int suspended;
297  
298  	/* Number of free command slots available */
299  	unsigned int free_slots;
300  
301  	/* Interrupt masks */
302  	u32 int_ok;
303  	u32 int_err;
304  
305  	/* Register addresses for queue */
306  	void __iomem *reg_control;
307  	void __iomem *reg_tail_lo;
308  	void __iomem *reg_head_lo;
309  	void __iomem *reg_int_enable;
310  	void __iomem *reg_interrupt_status;
311  	void __iomem *reg_status;
312  	void __iomem *reg_int_status;
313  	void __iomem *reg_dma_status;
314  	void __iomem *reg_dma_read_status;
315  	void __iomem *reg_dma_write_status;
316  	u32 qcontrol; /* Cached control register */
317  
318  	/* Status values from job */
319  	u32 int_status;
320  	u32 q_status;
321  	u32 q_int_status;
322  	u32 cmd_error;
323  
324  	/* Interrupt wait queue */
325  	wait_queue_head_t int_queue;
326  	unsigned int int_rcvd;
327  
328  	/* Per-queue Statistics */
329  	unsigned long total_ops;
330  	unsigned long total_aes_ops;
331  	unsigned long total_xts_aes_ops;
332  	unsigned long total_3des_ops;
333  	unsigned long total_sha_ops;
334  	unsigned long total_rsa_ops;
335  	unsigned long total_pt_ops;
336  	unsigned long total_ecc_ops;
337  } ____cacheline_aligned;
338  
339  struct ccp_device {
340  	struct list_head entry;
341  
342  	struct ccp_vdata *vdata;
343  	unsigned int ord;
344  	char name[MAX_CCP_NAME_LEN];
345  	char rngname[MAX_CCP_NAME_LEN];
346  
347  	struct device *dev;
348  	struct sp_device *sp;
349  
350  	/* Bus specific device information
351  	 */
352  	void *dev_specific;
353  	unsigned int qim;
354  	unsigned int irq;
355  	bool use_tasklet;
356  	struct tasklet_struct irq_tasklet;
357  
358  	/* I/O area used for device communication. The register mapping
359  	 * starts at an offset into the mapped bar.
360  	 *   The CMD_REQx registers and the Delete_Cmd_Queue_Job register
361  	 *   need to be protected while a command queue thread is accessing
362  	 *   them.
363  	 */
364  	struct mutex req_mutex ____cacheline_aligned;
365  	void __iomem *io_regs;
366  
367  	/* Master lists that all cmds are queued on. Because there can be
368  	 * more than one CCP command queue that can process a cmd a separate
369  	 * backlog list is needed so that the backlog completion call
370  	 * completes before the cmd is available for execution.
371  	 */
372  	spinlock_t cmd_lock ____cacheline_aligned;
373  	unsigned int cmd_count;
374  	struct list_head cmd;
375  	struct list_head backlog;
376  
377  	/* The command queues. These represent the queues available on the
378  	 * CCP that are available for processing cmds
379  	 */
380  	struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
381  	unsigned int cmd_q_count;
382  	unsigned int max_q_count;
383  
384  	/* Support for the CCP True RNG
385  	 */
386  	struct hwrng hwrng;
387  	unsigned int hwrng_retries;
388  
389  	/* Support for the CCP DMA capabilities
390  	 */
391  	struct dma_device dma_dev;
392  	struct ccp_dma_chan *ccp_dma_chan;
393  	struct kmem_cache *dma_cmd_cache;
394  	struct kmem_cache *dma_desc_cache;
395  
396  	/* A counter used to generate job-ids for cmds submitted to the CCP
397  	 */
398  	atomic_t current_id ____cacheline_aligned;
399  
400  	/* The v3 CCP uses key storage blocks (SB) to maintain context for
401  	 * certain operations. To prevent multiple cmds from using the same
402  	 * SB range a command queue reserves an SB range for the duration of
403  	 * the cmd. Each queue, will however, reserve 2 SB blocks for
404  	 * operations that only require single SB entries (eg. AES context/iv
405  	 * and key) in order to avoid allocation contention.  This will reserve
406  	 * at most 10 SB entries, leaving 40 SB entries available for dynamic
407  	 * allocation.
408  	 *
409  	 * The v5 CCP Local Storage Block (LSB) is broken up into 8
410  	 * memrory ranges, each of which can be enabled for access by one
411  	 * or more queues. Device initialization takes this into account,
412  	 * and attempts to assign one region for exclusive use by each
413  	 * available queue; the rest are then aggregated as "public" use.
414  	 * If there are fewer regions than queues, all regions are shared
415  	 * amongst all queues.
416  	 */
417  	struct mutex sb_mutex ____cacheline_aligned;
418  	DECLARE_BITMAP(sb, KSB_COUNT);
419  	wait_queue_head_t sb_queue;
420  	unsigned int sb_avail;
421  	unsigned int sb_count;
422  	u32 sb_start;
423  
424  	/* Bitmap of shared LSBs, if any */
425  	DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE);
426  
427  	/* Suspend support */
428  	unsigned int suspending;
429  	wait_queue_head_t suspend_queue;
430  
431  	/* DMA caching attribute support */
432  	unsigned int axcache;
433  
434  	/* Device Statistics */
435  	unsigned long total_interrupts;
436  
437  	/* DebugFS info */
438  	struct dentry *debugfs_instance;
439  };
440  
441  enum ccp_memtype {
442  	CCP_MEMTYPE_SYSTEM = 0,
443  	CCP_MEMTYPE_SB,
444  	CCP_MEMTYPE_LOCAL,
445  	CCP_MEMTYPE__LAST,
446  };
447  #define	CCP_MEMTYPE_LSB	CCP_MEMTYPE_KSB
448  
449  
450  struct ccp_dma_info {
451  	dma_addr_t address;
452  	unsigned int offset;
453  	unsigned int length;
454  	enum dma_data_direction dir;
455  } __packed __aligned(4);
456  
457  struct ccp_dm_workarea {
458  	struct device *dev;
459  	struct dma_pool *dma_pool;
460  
461  	u8 *address;
462  	struct ccp_dma_info dma;
463  	unsigned int length;
464  };
465  
466  struct ccp_sg_workarea {
467  	struct scatterlist *sg;
468  	int nents;
469  	unsigned int sg_used;
470  
471  	struct scatterlist *dma_sg;
472  	struct scatterlist *dma_sg_head;
473  	struct device *dma_dev;
474  	unsigned int dma_count;
475  	enum dma_data_direction dma_dir;
476  
477  	u64 bytes_left;
478  };
479  
480  struct ccp_data {
481  	struct ccp_sg_workarea sg_wa;
482  	struct ccp_dm_workarea dm_wa;
483  };
484  
485  struct ccp_mem {
486  	enum ccp_memtype type;
487  	union {
488  		struct ccp_dma_info dma;
489  		u32 sb;
490  	} u;
491  };
492  
493  struct ccp_aes_op {
494  	enum ccp_aes_type type;
495  	enum ccp_aes_mode mode;
496  	enum ccp_aes_action action;
497  	unsigned int size;
498  };
499  
500  struct ccp_xts_aes_op {
501  	enum ccp_aes_type type;
502  	enum ccp_aes_action action;
503  	enum ccp_xts_aes_unit_size unit_size;
504  };
505  
506  struct ccp_des3_op {
507  	enum ccp_des3_type type;
508  	enum ccp_des3_mode mode;
509  	enum ccp_des3_action action;
510  };
511  
512  struct ccp_sha_op {
513  	enum ccp_sha_type type;
514  	u64 msg_bits;
515  };
516  
517  struct ccp_rsa_op {
518  	u32 mod_size;
519  	u32 input_len;
520  };
521  
522  struct ccp_passthru_op {
523  	enum ccp_passthru_bitwise bit_mod;
524  	enum ccp_passthru_byteswap byte_swap;
525  };
526  
527  struct ccp_ecc_op {
528  	enum ccp_ecc_function function;
529  };
530  
531  struct ccp_op {
532  	struct ccp_cmd_queue *cmd_q;
533  
534  	u32 jobid;
535  	u32 ioc;
536  	u32 soc;
537  	u32 sb_key;
538  	u32 sb_ctx;
539  	u32 init;
540  	u32 eom;
541  
542  	struct ccp_mem src;
543  	struct ccp_mem dst;
544  	struct ccp_mem exp;
545  
546  	union {
547  		struct ccp_aes_op aes;
548  		struct ccp_xts_aes_op xts;
549  		struct ccp_des3_op des3;
550  		struct ccp_sha_op sha;
551  		struct ccp_rsa_op rsa;
552  		struct ccp_passthru_op passthru;
553  		struct ccp_ecc_op ecc;
554  	} u;
555  };
556  
ccp_addr_lo(struct ccp_dma_info * info)557  static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
558  {
559  	return lower_32_bits(info->address + info->offset);
560  }
561  
ccp_addr_hi(struct ccp_dma_info * info)562  static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
563  {
564  	return upper_32_bits(info->address + info->offset) & 0x0000ffff;
565  }
566  
567  /**
568   * descriptor for version 5 CPP commands
569   * 8 32-bit words:
570   * word 0: function; engine; control bits
571   * word 1: length of source data
572   * word 2: low 32 bits of source pointer
573   * word 3: upper 16 bits of source pointer; source memory type
574   * word 4: low 32 bits of destination pointer
575   * word 5: upper 16 bits of destination pointer; destination memory type
576   * word 6: low 32 bits of key pointer
577   * word 7: upper 16 bits of key pointer; key memory type
578   */
579  struct dword0 {
580  	unsigned int soc:1;
581  	unsigned int ioc:1;
582  	unsigned int rsvd1:1;
583  	unsigned int init:1;
584  	unsigned int eom:1;		/* AES/SHA only */
585  	unsigned int function:15;
586  	unsigned int engine:4;
587  	unsigned int prot:1;
588  	unsigned int rsvd2:7;
589  };
590  
591  struct dword3 {
592  	unsigned int  src_hi:16;
593  	unsigned int  src_mem:2;
594  	unsigned int  lsb_cxt_id:8;
595  	unsigned int  rsvd1:5;
596  	unsigned int  fixed:1;
597  };
598  
599  union dword4 {
600  	u32 dst_lo;		/* NON-SHA	*/
601  	u32 sha_len_lo;		/* SHA		*/
602  };
603  
604  union dword5 {
605  	struct {
606  		unsigned int  dst_hi:16;
607  		unsigned int  dst_mem:2;
608  		unsigned int  rsvd1:13;
609  		unsigned int  fixed:1;
610  	} fields;
611  	u32 sha_len_hi;
612  };
613  
614  struct dword7 {
615  	unsigned int  key_hi:16;
616  	unsigned int  key_mem:2;
617  	unsigned int  rsvd1:14;
618  };
619  
620  struct ccp5_desc {
621  	struct dword0 dw0;
622  	u32 length;
623  	u32 src_lo;
624  	struct dword3 dw3;
625  	union dword4 dw4;
626  	union dword5 dw5;
627  	u32 key_lo;
628  	struct dword7 dw7;
629  };
630  
631  void ccp_add_device(struct ccp_device *ccp);
632  void ccp_del_device(struct ccp_device *ccp);
633  
634  extern void ccp_log_error(struct ccp_device *, unsigned int);
635  
636  struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
637  bool ccp_queues_suspended(struct ccp_device *ccp);
638  int ccp_cmd_queue_thread(void *data);
639  int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
640  
641  int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
642  
643  int ccp_register_rng(struct ccp_device *ccp);
644  void ccp_unregister_rng(struct ccp_device *ccp);
645  int ccp_dmaengine_register(struct ccp_device *ccp);
646  void ccp_dmaengine_unregister(struct ccp_device *ccp);
647  
648  void ccp5_debugfs_setup(struct ccp_device *ccp);
649  void ccp5_debugfs_destroy(void);
650  
651  /* Structure for computation functions that are device-specific */
652  struct ccp_actions {
653  	int (*aes)(struct ccp_op *);
654  	int (*xts_aes)(struct ccp_op *);
655  	int (*des3)(struct ccp_op *);
656  	int (*sha)(struct ccp_op *);
657  	int (*rsa)(struct ccp_op *);
658  	int (*passthru)(struct ccp_op *);
659  	int (*ecc)(struct ccp_op *);
660  	u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
661  	void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
662  	unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
663  	int (*init)(struct ccp_device *);
664  	void (*destroy)(struct ccp_device *);
665  	irqreturn_t (*irqhandler)(int, void *);
666  };
667  
668  extern const struct ccp_vdata ccpv3_platform;
669  extern const struct ccp_vdata ccpv3;
670  extern const struct ccp_vdata ccpv5a;
671  extern const struct ccp_vdata ccpv5b;
672  
673  #endif
674