1 /* SPDX-License-Identifier: GPL-2.0-only
2  * Copyright (C) 2020 Marvell.
3  */
4 
5 #ifndef __OTX2_CPT_COMMON_H
6 #define __OTX2_CPT_COMMON_H
7 
8 #include <linux/pci.h>
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/crypto.h>
13 #include <net/devlink.h>
14 #include "otx2_cpt_hw_types.h"
15 #include "rvu.h"
16 #include "mbox.h"
17 
18 #define OTX2_CPT_MAX_VFS_NUM 128
19 #define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
20 		(((blk) << 20) | ((slot) << 12) | (offs))
21 #define OTX2_CPT_RVU_PFFUNC(pf, func)	\
22 		((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
23 		(((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
24 
25 #define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
26 #define OTX2_CPT_NAME_LENGTH 64
27 #define OTX2_CPT_DMA_MINALIGN 128
28 
29 /* HW capability flags */
30 #define CN10K_MBOX  0
31 #define CN10K_LMTST 1
32 
33 #define BAD_OTX2_CPT_ENG_TYPE OTX2_CPT_MAX_ENG_TYPES
34 
35 enum otx2_cpt_eng_type {
36 	OTX2_CPT_AE_TYPES = 1,
37 	OTX2_CPT_SE_TYPES = 2,
38 	OTX2_CPT_IE_TYPES = 3,
39 	OTX2_CPT_MAX_ENG_TYPES,
40 };
41 
42 /* Take mbox id from end of CPT mbox range in AF (range 0xA00 - 0xBFF) */
43 #define MBOX_MSG_RX_INLINE_IPSEC_LF_CFG 0xBFE
44 #define MBOX_MSG_GET_ENG_GRP_NUM        0xBFF
45 #define MBOX_MSG_GET_CAPS               0xBFD
46 #define MBOX_MSG_GET_KVF_LIMITS         0xBFC
47 
48 /*
49  * Message request to config cpt lf for inline inbound ipsec.
50  * This message is only used between CPT PF <-> CPT VF
51  */
52 struct otx2_cpt_rx_inline_lf_cfg {
53 	struct mbox_msghdr hdr;
54 	u16 sso_pf_func;
55 	u16 param1;
56 	u16 param2;
57 	u16 opcode;
58 	u32 credit;
59 	u32 credit_th;
60 	u16 bpid;
61 	u32 reserved;
62 	u8 ctx_ilen_valid : 1;
63 	u8 ctx_ilen : 7;
64 };
65 
66 /*
67  * Message request and response to get engine group number
68  * which has attached a given type of engines (SE, AE, IE)
69  * This messages are only used between CPT PF <=> CPT VF
70  */
71 struct otx2_cpt_egrp_num_msg {
72 	struct mbox_msghdr hdr;
73 	u8 eng_type;
74 };
75 
76 struct otx2_cpt_egrp_num_rsp {
77 	struct mbox_msghdr hdr;
78 	u8 eng_type;
79 	u8 eng_grp_num;
80 };
81 
82 /*
83  * Message request and response to get kernel crypto limits
84  * This messages are only used between CPT PF <-> CPT VF
85  */
86 struct otx2_cpt_kvf_limits_msg {
87 	struct mbox_msghdr hdr;
88 };
89 
90 struct otx2_cpt_kvf_limits_rsp {
91 	struct mbox_msghdr hdr;
92 	u8 kvf_limits;
93 };
94 
95 /* CPT HW capabilities */
96 union otx2_cpt_eng_caps {
97 	u64 u;
98 	struct {
99 		u64 reserved_0_4:5;
100 		u64 mul:1;
101 		u64 sha1_sha2:1;
102 		u64 chacha20:1;
103 		u64 zuc_snow3g:1;
104 		u64 sha3:1;
105 		u64 aes:1;
106 		u64 kasumi:1;
107 		u64 des:1;
108 		u64 crc:1;
109 		u64 mmul:1;
110 		u64 reserved_15_33:19;
111 		u64 pdcp_chain:1;
112 		u64 reserved_35_63:29;
113 	};
114 };
115 
116 /*
117  * Message request and response to get HW capabilities for each
118  * engine type (SE, IE, AE).
119  * This messages are only used between CPT PF <=> CPT VF
120  */
121 struct otx2_cpt_caps_msg {
122 	struct mbox_msghdr hdr;
123 };
124 
125 struct otx2_cpt_caps_rsp {
126 	struct mbox_msghdr hdr;
127 	u16 cpt_pf_drv_version;
128 	u8 cpt_revision;
129 	union otx2_cpt_eng_caps eng_caps[OTX2_CPT_MAX_ENG_TYPES];
130 };
131 
otx2_cpt_write64(void __iomem * reg_base,u64 blk,u64 slot,u64 offs,u64 val)132 static inline void otx2_cpt_write64(void __iomem *reg_base, u64 blk, u64 slot,
133 				    u64 offs, u64 val)
134 {
135 	writeq_relaxed(val, reg_base +
136 		       OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
137 }
138 
otx2_cpt_read64(void __iomem * reg_base,u64 blk,u64 slot,u64 offs)139 static inline u64 otx2_cpt_read64(void __iomem *reg_base, u64 blk, u64 slot,
140 				  u64 offs)
141 {
142 	return readq_relaxed(reg_base +
143 			     OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs));
144 }
145 
is_dev_otx2(struct pci_dev * pdev)146 static inline bool is_dev_otx2(struct pci_dev *pdev)
147 {
148 	if (pdev->device == OTX2_CPT_PCI_PF_DEVICE_ID ||
149 	    pdev->device == OTX2_CPT_PCI_VF_DEVICE_ID)
150 		return true;
151 
152 	return false;
153 }
154 
is_dev_cn10ka(struct pci_dev * pdev)155 static inline bool is_dev_cn10ka(struct pci_dev *pdev)
156 {
157 	return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A;
158 }
159 
is_dev_cn10ka_ax(struct pci_dev * pdev)160 static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
161 {
162 	if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
163 	    ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
164 	     (pdev->revision & 0xff) == 0x51))
165 		return true;
166 
167 	return false;
168 }
169 
is_dev_cn10kb(struct pci_dev * pdev)170 static inline bool is_dev_cn10kb(struct pci_dev *pdev)
171 {
172 	return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_B;
173 }
174 
is_dev_cn10ka_b0(struct pci_dev * pdev)175 static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
176 {
177 	if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
178 	    (pdev->revision & 0xFF) == 0x54)
179 		return true;
180 
181 	return false;
182 }
183 
otx2_cpt_set_hw_caps(struct pci_dev * pdev,unsigned long * cap_flag)184 static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
185 					unsigned long *cap_flag)
186 {
187 	if (!is_dev_otx2(pdev)) {
188 		__set_bit(CN10K_MBOX, cap_flag);
189 		__set_bit(CN10K_LMTST, cap_flag);
190 	}
191 }
192 
cpt_is_errata_38550_exists(struct pci_dev * pdev)193 static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
194 {
195 	if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
196 		return true;
197 
198 	return false;
199 }
200 
cpt_feature_sgv2(struct pci_dev * pdev)201 static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
202 {
203 	if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
204 		return true;
205 
206 	return false;
207 }
208 
209 int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
210 int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
211 
212 int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
213 				  struct pci_dev *pdev);
214 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
215 			      u64 reg, u64 val, int blkaddr);
216 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
217 			 u64 reg, u64 *val, int blkaddr);
218 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
219 			  u64 reg, u64 val, int blkaddr);
220 struct otx2_cptlfs_info;
221 int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
222 int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
223 int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
224 int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
225 int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
226 
227 #endif /* __OTX2_CPT_COMMON_H */
228