Lines Matching +full:async +full:- +full:enum
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
13 return resp_code ? -resp_code : 0; in hclgevf_resp_to_errno()
22 hdev->mbx_resp.received_resp = false; in hclgevf_reset_mbx_resp_status()
23 hdev->mbx_resp.origin_mbx_msg = 0; in hclgevf_reset_mbx_resp_status()
24 hdev->mbx_resp.resp_status = 0; in hclgevf_reset_mbx_resp_status()
25 hdev->mbx_resp.match_id++; in hclgevf_reset_mbx_resp_status()
27 if (hdev->mbx_resp.match_id == 0) in hclgevf_reset_mbx_resp_status()
28 hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; in hclgevf_reset_mbx_resp_status()
29 memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); in hclgevf_reset_mbx_resp_status()
36 * @code1: the message sub-opcode VF send to PF.
50 dev_err(&hdev->pdev->dev, in hclgevf_get_mbx_resp()
54 return -EINVAL; in hclgevf_get_mbx_resp()
57 while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { in hclgevf_get_mbx_resp()
59 &hdev->hw.hw.comm_state)) in hclgevf_get_mbx_resp()
60 return -EIO; in hclgevf_get_mbx_resp()
70 dev_err(&hdev->pdev->dev, in hclgevf_get_mbx_resp()
72 code0, code1, hdev->mbx_resp.received_resp, i); in hclgevf_get_mbx_resp()
73 return -EIO; in hclgevf_get_mbx_resp()
76 mbx_resp = &hdev->mbx_resp; in hclgevf_get_mbx_resp()
77 r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16); in hclgevf_get_mbx_resp()
78 r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff); in hclgevf_get_mbx_resp()
80 if (mbx_resp->resp_status) in hclgevf_get_mbx_resp()
81 return mbx_resp->resp_status; in hclgevf_get_mbx_resp()
84 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len); in hclgevf_get_mbx_resp()
88 if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { in hclgevf_get_mbx_resp()
89 dev_err(&hdev->pdev->dev, in hclgevf_get_mbx_resp()
91 code0, code1, mbx_resp->resp_status); in hclgevf_get_mbx_resp()
92 dev_err(&hdev->pdev->dev, in hclgevf_get_mbx_resp()
95 return -EIO; in hclgevf_get_mbx_resp()
112 dev_err(&hdev->pdev->dev, in hclgevf_send_mbx_msg()
114 return -EINVAL; in hclgevf_send_mbx_msg()
119 hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1); in hclgevf_send_mbx_msg()
121 memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg)); in hclgevf_send_mbx_msg()
123 if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) in hclgevf_send_mbx_msg()
128 mutex_lock(&hdev->mbx_resp.mbx_mutex); in hclgevf_send_mbx_msg()
130 req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); in hclgevf_send_mbx_msg()
131 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); in hclgevf_send_mbx_msg()
133 dev_err(&hdev->pdev->dev, in hclgevf_send_mbx_msg()
136 mutex_unlock(&hdev->mbx_resp.mbx_mutex); in hclgevf_send_mbx_msg()
140 status = hclgevf_get_mbx_resp(hdev, send_msg->code, in hclgevf_send_mbx_msg()
141 send_msg->subcode, resp_data, in hclgevf_send_mbx_msg()
143 mutex_unlock(&hdev->mbx_resp.mbx_mutex); in hclgevf_send_mbx_msg()
146 status = hclgevf_cmd_send(&hdev->hw, &desc, 1); in hclgevf_send_mbx_msg()
148 dev_err(&hdev->pdev->dev, in hclgevf_send_mbx_msg()
162 return tail == hw->hw.cmq.crq.next_to_use; in hclgevf_cmd_crq_empty()
168 u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); in hclgevf_handle_mbx_response()
169 u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); in hclgevf_handle_mbx_response()
170 struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; in hclgevf_handle_mbx_response()
171 u16 resp_status = le16_to_cpu(req->msg.resp_status); in hclgevf_handle_mbx_response()
172 u16 match_id = le16_to_cpu(req->match_id); in hclgevf_handle_mbx_response()
174 if (resp->received_resp) in hclgevf_handle_mbx_response()
175 dev_warn(&hdev->pdev->dev, in hclgevf_handle_mbx_response()
179 resp->origin_mbx_msg = (vf_mbx_msg_code << 16); in hclgevf_handle_mbx_response()
180 resp->origin_mbx_msg |= vf_mbx_msg_subcode; in hclgevf_handle_mbx_response()
181 resp->resp_status = hclgevf_resp_to_errno(resp_status); in hclgevf_handle_mbx_response()
182 memcpy(resp->additional_info, req->msg.resp_data, in hclgevf_handle_mbx_response()
191 * ignore the response. and driver will clear hdev->mbx_resp in hclgevf_handle_mbx_response()
194 if (match_id == resp->match_id) in hclgevf_handle_mbx_response()
195 resp->received_resp = true; in hclgevf_handle_mbx_response()
197 resp->received_resp = true; in hclgevf_handle_mbx_response()
204 /* we will drop the async msg if we find ARQ as full in hclgevf_handle_mbx_msg()
207 if (atomic_read(&hdev->arq.count) >= in hclgevf_handle_mbx_msg()
209 dev_warn(&hdev->pdev->dev, in hclgevf_handle_mbx_msg()
210 "Async Q full, dropping msg(%u)\n", in hclgevf_handle_mbx_msg()
211 le16_to_cpu(req->msg.code)); in hclgevf_handle_mbx_msg()
215 /* tail the async message in arq */ in hclgevf_handle_mbx_msg()
216 memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg, in hclgevf_handle_mbx_msg()
218 hclge_mbx_tail_ptr_move_arq(hdev->arq); in hclgevf_handle_mbx_msg()
219 atomic_inc(&hdev->arq.count); in hclgevf_handle_mbx_msg()
232 crq = &hdev->hw.hw.cmq.crq; in hclgevf_mbx_handler()
234 while (!hclgevf_cmd_crq_empty(&hdev->hw)) { in hclgevf_mbx_handler()
236 &hdev->hw.hw.comm_state)) { in hclgevf_mbx_handler()
237 dev_info(&hdev->pdev->dev, "vf crq need init\n"); in hclgevf_mbx_handler()
241 desc = &crq->desc[crq->next_to_use]; in hclgevf_mbx_handler()
242 req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; in hclgevf_mbx_handler()
244 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); in hclgevf_mbx_handler()
245 code = le16_to_cpu(req->msg.code); in hclgevf_mbx_handler()
247 dev_warn(&hdev->pdev->dev, in hclgevf_mbx_handler()
252 crq->desc[crq->next_to_use].flag = 0; in hclgevf_mbx_handler()
262 * timeout and simultaneously queue the async messages for later in hclgevf_mbx_handler()
277 dev_err(&hdev->pdev->dev, in hclgevf_mbx_handler()
282 crq->desc[crq->next_to_use].flag = 0; in hclgevf_mbx_handler()
287 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, in hclgevf_mbx_handler()
288 crq->next_to_use); in hclgevf_mbx_handler()
295 dev_info(&hdev->pdev->dev, in hclgevf_parse_promisc_info()
304 enum hnae3_reset_type reset_type; in hclgevf_mbx_async_handler()
314 tail = hdev->arq.tail; in hclgevf_mbx_async_handler()
316 /* process all the async queue messages */ in hclgevf_mbx_async_handler()
317 while (tail != hdev->arq.head) { in hclgevf_mbx_async_handler()
319 &hdev->hw.hw.comm_state)) { in hclgevf_mbx_async_handler()
320 dev_info(&hdev->pdev->dev, in hclgevf_mbx_async_handler()
321 "vf crq need init in async\n"); in hclgevf_mbx_async_handler()
325 msg_q = hdev->arq.msg_q[hdev->arq.head]; in hclgevf_mbx_async_handler()
330 link_status = le16_to_cpu(link_info->link_status); in hclgevf_mbx_async_handler()
331 speed = le32_to_cpu(link_info->speed); in hclgevf_mbx_async_handler()
332 duplex = (u8)le16_to_cpu(link_info->duplex); in hclgevf_mbx_async_handler()
333 flag = link_info->flag; in hclgevf_mbx_async_handler()
341 &hdev->state); in hclgevf_mbx_async_handler()
346 idx = le16_to_cpu(link_mode->idx); in hclgevf_mbx_async_handler()
348 hdev->hw.mac.supported = in hclgevf_mbx_async_handler()
349 le64_to_cpu(link_mode->link_mode); in hclgevf_mbx_async_handler()
351 hdev->hw.mac.advertising = in hclgevf_mbx_async_handler()
352 le64_to_cpu(link_mode->link_mode); in hclgevf_mbx_async_handler()
358 * eventually be re-initialized. in hclgevf_mbx_async_handler()
361 (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); in hclgevf_mbx_async_handler()
362 set_bit(reset_type, &hdev->reset_pending); in hclgevf_mbx_async_handler()
363 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); in hclgevf_mbx_async_handler()
370 state = le16_to_cpu(vlan_info->state); in hclgevf_mbx_async_handler()
378 dev_err(&hdev->pdev->dev, in hclgevf_mbx_async_handler()
384 hclge_mbx_head_ptr_move_arq(hdev->arq); in hclgevf_mbx_async_handler()
385 atomic_dec(&hdev->arq.count); in hclgevf_mbx_async_handler()
386 msg_q = hdev->arq.msg_q[hdev->arq.head]; in hclgevf_mbx_async_handler()