1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2017 Intel Deutschland GmbH
4  * Copyright (C) 2019-2021, 2024 Intel Corporation
5  */
6 #include "iwl-drv.h"
7 #include "runtime.h"
8 #include "dbg.h"
9 #include "debugfs.h"
10 
11 #include "fw/api/system.h"
12 #include "fw/api/commands.h"
13 #include "fw/api/rx.h"
14 #include "fw/api/datapath.h"
15 
iwl_fw_runtime_init(struct iwl_fw_runtime * fwrt,struct iwl_trans * trans,const struct iwl_fw * fw,const struct iwl_fw_runtime_ops * ops,void * ops_ctx,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx,struct dentry * dbgfs_dir)16 void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
17 			const struct iwl_fw *fw,
18 			const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
19 			const struct iwl_dump_sanitize_ops *sanitize_ops,
20 			void *sanitize_ctx,
21 			struct dentry *dbgfs_dir)
22 {
23 	int i;
24 
25 	memset(fwrt, 0, sizeof(*fwrt));
26 	fwrt->trans = trans;
27 	fwrt->fw = fw;
28 	fwrt->dev = trans->dev;
29 	fwrt->dump.conf = FW_DBG_INVALID;
30 	fwrt->ops = ops;
31 	fwrt->sanitize_ops = sanitize_ops;
32 	fwrt->sanitize_ctx = sanitize_ctx;
33 	fwrt->ops_ctx = ops_ctx;
34 	for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
35 		fwrt->dump.wks[i].idx = i;
36 		INIT_DELAYED_WORK(&fwrt->dump.wks[i].wk, iwl_fw_error_dump_wk);
37 	}
38 	iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
39 }
40 IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
41 
42 /* Assumes the appropriate lock is held by the caller */
iwl_fw_runtime_suspend(struct iwl_fw_runtime * fwrt)43 void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
44 {
45 	iwl_fw_suspend_timestamp(fwrt);
46 	iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START,
47 				    NULL);
48 }
49 IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
50 
iwl_fw_runtime_resume(struct iwl_fw_runtime * fwrt)51 void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
52 {
53 	iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, NULL);
54 	iwl_fw_resume_timestamp(fwrt);
55 }
56 IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
57 
58 /* set device type and latency */
iwl_set_soc_latency(struct iwl_fw_runtime * fwrt)59 int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
60 {
61 	struct iwl_soc_configuration_cmd cmd = {};
62 	struct iwl_host_cmd hcmd = {
63 		.id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD),
64 		.data[0] = &cmd,
65 		.len[0] = sizeof(cmd),
66 	};
67 	int ret;
68 
69 	/*
70 	 * In VER_1 of this command, the discrete value is considered
71 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
72 	 * values in VER_1, this is backwards-compatible with VER_2,
73 	 * as long as we don't set any other bits.
74 	 */
75 	if (!fwrt->trans->trans_cfg->integrated)
76 		cmd.flags = cpu_to_le32(SOC_CONFIG_CMD_FLAGS_DISCRETE);
77 
78 	BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_NONE !=
79 		     SOC_FLAGS_LTR_APPLY_DELAY_NONE);
80 	BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_200US !=
81 		     SOC_FLAGS_LTR_APPLY_DELAY_200);
82 	BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_2500US !=
83 		     SOC_FLAGS_LTR_APPLY_DELAY_2500);
84 	BUILD_BUG_ON(IWL_CFG_TRANS_LTR_DELAY_1820US !=
85 		     SOC_FLAGS_LTR_APPLY_DELAY_1820);
86 
87 	if (fwrt->trans->trans_cfg->ltr_delay != IWL_CFG_TRANS_LTR_DELAY_NONE &&
88 	    !WARN_ON(!fwrt->trans->trans_cfg->integrated))
89 		cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay,
90 					      SOC_FLAGS_LTR_APPLY_DELAY_MASK);
91 
92 	if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC,
93 				  IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
94 	    fwrt->trans->trans_cfg->low_latency_xtal)
95 		cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
96 
97 	cmd.latency = cpu_to_le32(fwrt->trans->trans_cfg->xtal_latency);
98 
99 	ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
100 	if (ret)
101 		IWL_ERR(fwrt, "Failed to set soc latency: %d\n", ret);
102 	return ret;
103 }
104 IWL_EXPORT_SYMBOL(iwl_set_soc_latency);
105 
iwl_configure_rxq(struct iwl_fw_runtime * fwrt)106 int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
107 {
108 	int i, num_queues, size, ret;
109 	struct iwl_rfh_queue_config *cmd;
110 	struct iwl_host_cmd hcmd = {
111 		.id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
112 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
113 	};
114 
115 	/*
116 	 * The default queue is configured via context info, so if we
117 	 * have a single queue, there's nothing to do here.
118 	 */
119 	if (fwrt->trans->num_rx_queues == 1)
120 		return 0;
121 
122 	if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000)
123 		return 0;
124 
125 	/* skip the default queue */
126 	num_queues = fwrt->trans->num_rx_queues - 1;
127 
128 	size = struct_size(cmd, data, num_queues);
129 
130 	cmd = kzalloc(size, GFP_KERNEL);
131 	if (!cmd)
132 		return -ENOMEM;
133 
134 	cmd->num_queues = num_queues;
135 
136 	for (i = 0; i < num_queues; i++) {
137 		struct iwl_trans_rxq_dma_data data;
138 
139 		cmd->data[i].q_num = i + 1;
140 		ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
141 		if (ret)
142 			goto out;
143 
144 		cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
145 		cmd->data[i].urbd_stts_wrptr =
146 			cpu_to_le64(data.urbd_stts_wrptr);
147 		cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
148 		cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
149 	}
150 
151 	hcmd.data[0] = cmd;
152 	hcmd.len[0] = size;
153 
154 	ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
155 
156 out:
157 	kfree(cmd);
158 
159 	if (ret)
160 		IWL_ERR(fwrt, "Failed to configure RX queues: %d\n", ret);
161 
162 	return ret;
163 }
164 IWL_EXPORT_SYMBOL(iwl_configure_rxq);
165