Lines Matching refs:qm

20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)  in qm_wait_dev_not_ready()  argument
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE, in qm_wait_dev_not_ready()
33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs) in qm_check_reg_state() argument
38 state = readl(qm->io_base + regs); in qm_check_reg_state()
41 state = readl(qm->io_base + regs); in qm_check_reg_state()
48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr, in qm_read_regs() argument
57 data[i] = readl(qm->io_base + reg_addr); in qm_read_regs()
64 static int qm_write_regs(struct hisi_qm *qm, u32 reg, in qm_write_regs() argument
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET); in qm_write_regs()
78 static int qm_get_vft(struct hisi_qm *qm, u32 *base) in qm_get_vft() argument
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); in qm_get_vft()
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft()
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << in qm_get_vft()
98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr) in qm_get_sqc() argument
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1); in qm_get_sqc()
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_sqc()
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << in qm_get_sqc()
113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr) in qm_get_cqc() argument
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1); in qm_get_cqc()
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_cqc()
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << in qm_get_cqc()
128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) in qm_get_regs() argument
130 struct device *dev = &qm->pdev->dev; in qm_get_regs()
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); in qm_get_regs()
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); in qm_get_regs()
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V, in qm_get_regs()
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); in qm_get_regs()
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); in qm_get_regs()
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); in qm_get_regs()
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); in qm_get_regs()
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); in qm_get_regs()
187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data) in qm_set_regs() argument
189 struct device *dev = &qm->pdev->dev; in qm_set_regs()
193 if (unlikely(hisi_qm_wait_mb_ready(qm))) { in qm_set_regs()
194 dev_err(&qm->pdev->dev, "QM device is not ready to write\n"); in qm_set_regs()
198 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1); in qm_set_regs()
204 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1); in qm_set_regs()
210 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V, in qm_set_regs()
217 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1); in qm_set_regs()
223 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1); in qm_set_regs()
229 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1); in qm_set_regs()
235 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1); in qm_set_regs()
242 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7); in qm_set_regs()
249 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7); in qm_set_regs()
258 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, in qm_db() argument
275 writeq(doorbell, qm->io_base + dbase); in qm_db()
278 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase) in pf_qm_get_qp_num() argument
285 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in pf_qm_get_qp_num()
291 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in pf_qm_get_qp_num()
293 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE); in pf_qm_get_qp_num()
294 writel(vf_id, qm->io_base + QM_VFT_CFG); in pf_qm_get_qp_num()
296 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in pf_qm_get_qp_num()
297 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in pf_qm_get_qp_num()
299 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in pf_qm_get_qp_num()
305 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in pf_qm_get_qp_num()
306 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << in pf_qm_get_qp_num()
316 static void qm_dev_cmd_init(struct hisi_qm *qm) in qm_dev_cmd_init() argument
319 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_dev_cmd_init()
322 writel(0x0, qm->io_base + QM_IFC_INT_MASK); in qm_dev_cmd_init()
325 static int vf_qm_cache_wb(struct hisi_qm *qm) in vf_qm_cache_wb() argument
329 writel(0x1, qm->io_base + QM_CACHE_WB_START); in vf_qm_cache_wb()
330 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in vf_qm_cache_wb()
333 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n"); in vf_qm_cache_wb()
340 static void vf_qm_fun_reset(struct hisi_qm *qm) in vf_qm_fun_reset() argument
344 for (i = 0; i < qm->qp_num; i++) in vf_qm_fun_reset()
345 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1); in vf_qm_fun_reset()
348 static int vf_qm_func_stop(struct hisi_qm *qm) in vf_qm_func_stop() argument
350 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0); in vf_qm_func_stop()
447 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm; in vf_qm_load_data() local
448 struct device *dev = &qm->pdev->dev; in vf_qm_load_data()
459 qm->eqe_dma = vf_data->eqe_dma; in vf_qm_load_data()
460 qm->aeqe_dma = vf_data->aeqe_dma; in vf_qm_load_data()
461 qm->sqc_dma = vf_data->sqc_dma; in vf_qm_load_data()
462 qm->cqc_dma = vf_data->cqc_dma; in vf_qm_load_data()
464 qm->qp_base = vf_data->qp_base; in vf_qm_load_data()
465 qm->qp_num = vf_data->qp_num; in vf_qm_load_data()
467 ret = qm_set_regs(qm, vf_data); in vf_qm_load_data()
473 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in vf_qm_load_data()
479 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in vf_qm_load_data()
485 qm_dev_cmd_init(qm); in vf_qm_load_data()
556 struct hisi_qm *qm = hisi_acc_vdev->pf_qm; in hisi_acc_check_int_state() local
558 struct device *dev = &qm->pdev->dev; in hisi_acc_check_int_state()
562 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS); in hisi_acc_check_int_state()
583 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS); in hisi_acc_check_int_state()
590 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS); in hisi_acc_check_int_state()
597 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS); in hisi_acc_check_int_state()