1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2021, MediaTek Inc.
4   * Copyright (c) 2021-2022, Intel Corporation.
5   *
6   * Authors:
7   *  Haijun Liu <haijun.liu@mediatek.com>
8   *  Eliot Lee <eliot.lee@intel.com>
9   *  Moises Veleta <moises.veleta@intel.com>
10   *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11   *
12   * Contributors:
13   *  Amir Hanania <amir.hanania@intel.com>
14   *  Sreehari Kancharla <sreehari.kancharla@intel.com>
15   */
16  
17  #include <linux/bits.h>
18  #include <linux/bitfield.h>
19  #include <linux/completion.h>
20  #include <linux/device.h>
21  #include <linux/delay.h>
22  #include <linux/err.h>
23  #include <linux/gfp.h>
24  #include <linux/iopoll.h>
25  #include <linux/jiffies.h>
26  #include <linux/kernel.h>
27  #include <linux/kthread.h>
28  #include <linux/list.h>
29  #include <linux/slab.h>
30  #include <linux/spinlock.h>
31  #include <linux/string.h>
32  #include <linux/types.h>
33  #include <linux/wait.h>
34  
35  #include "t7xx_hif_cldma.h"
36  #include "t7xx_mhccif.h"
37  #include "t7xx_modem_ops.h"
38  #include "t7xx_pci.h"
39  #include "t7xx_pcie_mac.h"
40  #include "t7xx_port_proxy.h"
41  #include "t7xx_reg.h"
42  #include "t7xx_state_monitor.h"
43  
44  #define FSM_DRM_DISABLE_DELAY_MS		200
45  #define FSM_EVENT_POLL_INTERVAL_MS		20
46  #define FSM_MD_EX_REC_OK_TIMEOUT_MS		10000
47  #define FSM_MD_EX_PASS_TIMEOUT_MS		45000
48  #define FSM_CMD_TIMEOUT_MS			2000
49  
50  #define wait_for_expected_dev_stage(status)	\
51  	read_poll_timeout(ioread32, status,	\
52  			  ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) ||	\
53  			  ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000,	\
54  			  20000000, false, IREG_BASE(md->t7xx_dev) +	\
55  			  T7XX_PCIE_MISC_DEV_STATUS)
56  
t7xx_fsm_notifier_register(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)57  void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
58  {
59  	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
60  	unsigned long flags;
61  
62  	spin_lock_irqsave(&ctl->notifier_lock, flags);
63  	list_add_tail(&notifier->entry, &ctl->notifier_list);
64  	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
65  }
66  
t7xx_fsm_notifier_unregister(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)67  void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
68  {
69  	struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
70  	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
71  	unsigned long flags;
72  
73  	spin_lock_irqsave(&ctl->notifier_lock, flags);
74  	list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
75  		if (notifier_cur == notifier)
76  			list_del(&notifier->entry);
77  	}
78  	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
79  }
80  
fsm_state_notify(struct t7xx_modem * md,enum md_state state)81  static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
82  {
83  	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
84  	struct t7xx_fsm_notifier *notifier;
85  	unsigned long flags;
86  
87  	spin_lock_irqsave(&ctl->notifier_lock, flags);
88  	list_for_each_entry(notifier, &ctl->notifier_list, entry) {
89  		spin_unlock_irqrestore(&ctl->notifier_lock, flags);
90  		if (notifier->notifier_fn)
91  			notifier->notifier_fn(state, notifier->data);
92  
93  		spin_lock_irqsave(&ctl->notifier_lock, flags);
94  	}
95  	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
96  }
97  
t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl * ctl,enum md_state state)98  void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
99  {
100  	ctl->md_state = state;
101  
102  	/* Update to port first, otherwise sending message on HS2 may fail */
103  	t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
104  	fsm_state_notify(ctl->md, state);
105  }
106  
fsm_finish_command(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,int result)107  static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
108  {
109  	if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
110  		*cmd->ret = result;
111  		complete_all(cmd->done);
112  	}
113  
114  	kfree(cmd);
115  }
116  
fsm_del_kf_event(struct t7xx_fsm_event * event)117  static void fsm_del_kf_event(struct t7xx_fsm_event *event)
118  {
119  	list_del(&event->entry);
120  	kfree(event);
121  }
122  
fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl * ctl)123  static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
124  {
125  	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
126  	struct t7xx_fsm_event *event, *evt_next;
127  	struct t7xx_fsm_command *cmd, *cmd_next;
128  	unsigned long flags;
129  
130  	spin_lock_irqsave(&ctl->command_lock, flags);
131  	list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
132  		dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
133  		list_del(&cmd->entry);
134  		fsm_finish_command(ctl, cmd, -EINVAL);
135  	}
136  	spin_unlock_irqrestore(&ctl->command_lock, flags);
137  
138  	spin_lock_irqsave(&ctl->event_lock, flags);
139  	list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
140  		dev_warn(dev, "Unhandled event %d\n", event->event_id);
141  		fsm_del_kf_event(event);
142  	}
143  	spin_unlock_irqrestore(&ctl->event_lock, flags);
144  }
145  
fsm_wait_for_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_expected,enum t7xx_fsm_event_state event_ignore,int retries)146  static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
147  			       enum t7xx_fsm_event_state event_ignore, int retries)
148  {
149  	struct t7xx_fsm_event *event;
150  	bool event_received = false;
151  	unsigned long flags;
152  	int cnt = 0;
153  
154  	while (cnt++ < retries && !event_received) {
155  		bool sleep_required = true;
156  
157  		if (kthread_should_stop())
158  			return;
159  
160  		spin_lock_irqsave(&ctl->event_lock, flags);
161  		event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
162  		if (event) {
163  			event_received = event->event_id == event_expected;
164  			if (event_received || event->event_id == event_ignore) {
165  				fsm_del_kf_event(event);
166  				sleep_required = false;
167  			}
168  		}
169  		spin_unlock_irqrestore(&ctl->event_lock, flags);
170  
171  		if (sleep_required)
172  			msleep(FSM_EVENT_POLL_INTERVAL_MS);
173  	}
174  }
175  
fsm_routine_exception(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,enum t7xx_ex_reason reason)176  static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
177  				  enum t7xx_ex_reason reason)
178  {
179  	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
180  
181  	if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
182  		if (cmd)
183  			fsm_finish_command(ctl, cmd, -EINVAL);
184  
185  		return;
186  	}
187  
188  	ctl->curr_state = FSM_STATE_EXCEPTION;
189  
190  	switch (reason) {
191  	case EXCEPTION_HS_TIMEOUT:
192  		dev_err(dev, "Boot Handshake failure\n");
193  		break;
194  
195  	case EXCEPTION_EVENT:
196  		dev_err(dev, "Exception event\n");
197  		t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
198  		t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
199  		t7xx_md_exception_handshake(ctl->md);
200  
201  		fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
202  				   FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
203  		fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
204  				   FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
205  		break;
206  
207  	default:
208  		dev_err(dev, "Exception %d\n", reason);
209  		break;
210  	}
211  
212  	if (cmd)
213  		fsm_finish_command(ctl, cmd, 0);
214  }
215  
t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl * ctl,unsigned int status)216  static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
217  {
218  	struct t7xx_modem *md = ctl->md;
219  	struct cldma_ctrl *md_ctrl;
220  	enum lk_event_id lk_event;
221  	struct device *dev;
222  	struct t7xx_port *port;
223  
224  	dev = &md->t7xx_dev->pdev->dev;
225  	lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
226  	switch (lk_event) {
227  	case LK_EVENT_NORMAL:
228  	case LK_EVENT_RESET:
229  		break;
230  
231  	case LK_EVENT_CREATE_PD_PORT:
232  	case LK_EVENT_CREATE_POST_DL_PORT:
233  		md_ctrl = md->md_ctrl[CLDMA_ID_AP];
234  		t7xx_cldma_hif_hw_init(md_ctrl);
235  		t7xx_cldma_stop(md_ctrl);
236  		t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
237  
238  		port = &ctl->md->port_prox->ports[0];
239  		port->port_conf->ops->enable_chl(port);
240  
241  		t7xx_cldma_start(md_ctrl);
242  
243  		if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
244  			t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
245  		else
246  			t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
247  		break;
248  
249  	default:
250  		dev_err(dev, "Invalid LK event %d\n", lk_event);
251  		break;
252  	}
253  }
254  
fsm_stopped_handler(struct t7xx_fsm_ctl * ctl)255  static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
256  {
257  	enum t7xx_mode mode;
258  
259  	ctl->curr_state = FSM_STATE_STOPPED;
260  
261  	mode = READ_ONCE(ctl->md->t7xx_dev->mode);
262  	if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP)
263  		return 0;
264  
265  	t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
266  	return t7xx_md_reset(ctl->md->t7xx_dev);
267  }
268  
fsm_routine_stopped(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)269  static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
270  {
271  	if (ctl->curr_state == FSM_STATE_STOPPED) {
272  		fsm_finish_command(ctl, cmd, -EINVAL);
273  		return;
274  	}
275  
276  	fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
277  }
278  
fsm_routine_stopping(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)279  static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
280  {
281  	struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
282  	struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
283  
284  	if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
285  		fsm_finish_command(ctl, cmd, -EINVAL);
286  		return;
287  	}
288  
289  	ctl->curr_state = FSM_STATE_STOPPING;
290  	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
291  	t7xx_cldma_stop(md_ctrl);
292  
293  	t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
294  	/* Wait for the DRM disable to take effect */
295  	msleep(FSM_DRM_DISABLE_DELAY_MS);
296  
297  	fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
298  }
299  
t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl * ctl)300  static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
301  {
302  	if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
303  		return;
304  
305  	ctl->md_state = MD_STATE_READY;
306  
307  	fsm_state_notify(ctl->md, MD_STATE_READY);
308  	t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
309  }
310  
fsm_routine_ready(struct t7xx_fsm_ctl * ctl)311  static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
312  {
313  	struct t7xx_modem *md = ctl->md;
314  
315  	ctl->curr_state = FSM_STATE_READY;
316  	t7xx_fsm_broadcast_ready_state(ctl);
317  	t7xx_mode_update(md->t7xx_dev, T7XX_READY);
318  	t7xx_md_event_notify(md, FSM_READY);
319  }
320  
fsm_routine_starting(struct t7xx_fsm_ctl * ctl)321  static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
322  {
323  	struct t7xx_modem *md = ctl->md;
324  	struct device *dev;
325  
326  	ctl->curr_state = FSM_STATE_STARTING;
327  
328  	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
329  	t7xx_md_event_notify(md, FSM_START);
330  
331  	wait_event_interruptible_timeout(ctl->async_hk_wq,
332  					 (md->core_md.ready && md->core_ap.ready) ||
333  					  ctl->exp_flg, HZ * 60);
334  	dev = &md->t7xx_dev->pdev->dev;
335  
336  	if (ctl->exp_flg)
337  		dev_err(dev, "MD exception is captured during handshake\n");
338  
339  	if (!md->core_md.ready) {
340  		dev_err(dev, "MD handshake timeout\n");
341  		if (md->core_md.handshake_ongoing)
342  			t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
343  
344  		fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
345  		return -ETIMEDOUT;
346  	} else if (!md->core_ap.ready) {
347  		dev_err(dev, "AP handshake timeout\n");
348  		if (md->core_ap.handshake_ongoing)
349  			t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
350  
351  		fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
352  		return -ETIMEDOUT;
353  	}
354  
355  	t7xx_pci_pm_init_late(md->t7xx_dev);
356  	fsm_routine_ready(ctl);
357  	return 0;
358  }
359  
fsm_routine_start(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)360  static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
361  {
362  	struct t7xx_modem *md = ctl->md;
363  	struct device *dev;
364  	u32 status;
365  	int ret;
366  
367  	if (!md)
368  		return;
369  
370  	if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
371  	    ctl->curr_state != FSM_STATE_STOPPED) {
372  		fsm_finish_command(ctl, cmd, -EINVAL);
373  		return;
374  	}
375  
376  	dev = &md->t7xx_dev->pdev->dev;
377  	ctl->curr_state = FSM_STATE_PRE_START;
378  	t7xx_md_event_notify(md, FSM_PRE_START);
379  
380  	ret = wait_for_expected_dev_stage(status);
381  
382  	if (ret) {
383  		dev_err(dev, "read poll timeout %d\n", ret);
384  		goto finish_command;
385  	}
386  
387  	if (status != ctl->status || cmd->flag != 0) {
388  		u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
389  
390  		switch (stage) {
391  		case T7XX_DEV_STAGE_INIT:
392  		case T7XX_DEV_STAGE_BROM_PRE:
393  		case T7XX_DEV_STAGE_BROM_POST:
394  			dev_dbg(dev, "BROM_STAGE Entered\n");
395  			ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
396  			break;
397  
398  		case T7XX_DEV_STAGE_LK:
399  			dev_dbg(dev, "LK_STAGE Entered\n");
400  			t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
401  			t7xx_lk_stage_event_handling(ctl, status);
402  
403  			break;
404  
405  		case T7XX_DEV_STAGE_LINUX:
406  			dev_dbg(dev, "LINUX_STAGE Entered\n");
407  			t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
408  					     D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
409  			if (cmd->flag == 0)
410  				break;
411  			t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
412  			t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
413  			t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
414  			ret = fsm_routine_starting(ctl);
415  			break;
416  
417  		default:
418  			break;
419  		}
420  		ctl->status = status;
421  	}
422  
423  finish_command:
424  	if (ret)
425  		t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN);
426  
427  	fsm_finish_command(ctl, cmd, ret);
428  }
429  
fsm_main_thread(void * data)430  static int fsm_main_thread(void *data)
431  {
432  	struct t7xx_fsm_ctl *ctl = data;
433  	struct t7xx_fsm_command *cmd;
434  	unsigned long flags;
435  
436  	while (!kthread_should_stop()) {
437  		if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
438  					     kthread_should_stop()))
439  			continue;
440  
441  		if (kthread_should_stop())
442  			break;
443  
444  		spin_lock_irqsave(&ctl->command_lock, flags);
445  		cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
446  		list_del(&cmd->entry);
447  		spin_unlock_irqrestore(&ctl->command_lock, flags);
448  
449  		switch (cmd->cmd_id) {
450  		case FSM_CMD_START:
451  			fsm_routine_start(ctl, cmd);
452  			break;
453  
454  		case FSM_CMD_EXCEPTION:
455  			fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
456  			break;
457  
458  		case FSM_CMD_PRE_STOP:
459  			fsm_routine_stopping(ctl, cmd);
460  			break;
461  
462  		case FSM_CMD_STOP:
463  			fsm_routine_stopped(ctl, cmd);
464  			break;
465  
466  		default:
467  			fsm_finish_command(ctl, cmd, -EINVAL);
468  			fsm_flush_event_cmd_qs(ctl);
469  			break;
470  		}
471  	}
472  
473  	return 0;
474  }
475  
t7xx_fsm_append_cmd(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_cmd_state cmd_id,unsigned int flag)476  int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
477  {
478  	DECLARE_COMPLETION_ONSTACK(done);
479  	struct t7xx_fsm_command *cmd;
480  	unsigned long flags;
481  	int ret;
482  
483  	cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
484  	if (!cmd)
485  		return -ENOMEM;
486  
487  	INIT_LIST_HEAD(&cmd->entry);
488  	cmd->cmd_id = cmd_id;
489  	cmd->flag = flag;
490  	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
491  		cmd->done = &done;
492  		cmd->ret = &ret;
493  	}
494  
495  	spin_lock_irqsave(&ctl->command_lock, flags);
496  	list_add_tail(&cmd->entry, &ctl->command_queue);
497  	spin_unlock_irqrestore(&ctl->command_lock, flags);
498  
499  	wake_up(&ctl->command_wq);
500  
501  	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
502  		unsigned long wait_ret;
503  
504  		wait_ret = wait_for_completion_timeout(&done,
505  						       msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
506  		if (!wait_ret)
507  			return -ETIMEDOUT;
508  
509  		return ret;
510  	}
511  
512  	return 0;
513  }
514  
t7xx_fsm_append_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,unsigned char * data,unsigned int length)515  int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
516  			  unsigned char *data, unsigned int length)
517  {
518  	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
519  	struct t7xx_fsm_event *event;
520  	unsigned long flags;
521  
522  	if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
523  		dev_err(dev, "Invalid event %d\n", event_id);
524  		return -EINVAL;
525  	}
526  
527  	event = kmalloc(struct_size(event, data, length),
528  			in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
529  	if (!event)
530  		return -ENOMEM;
531  
532  	INIT_LIST_HEAD(&event->entry);
533  	event->event_id = event_id;
534  	event->length = length;
535  
536  	if (data && length)
537  		memcpy(event->data, data, length);
538  
539  	spin_lock_irqsave(&ctl->event_lock, flags);
540  	list_add_tail(&event->entry, &ctl->event_queue);
541  	spin_unlock_irqrestore(&ctl->event_lock, flags);
542  
543  	wake_up_all(&ctl->event_wq);
544  	return 0;
545  }
546  
t7xx_fsm_clr_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id)547  void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
548  {
549  	struct t7xx_fsm_event *event, *evt_next;
550  	unsigned long flags;
551  
552  	spin_lock_irqsave(&ctl->event_lock, flags);
553  	list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
554  		if (event->event_id == event_id)
555  			fsm_del_kf_event(event);
556  	}
557  	spin_unlock_irqrestore(&ctl->event_lock, flags);
558  }
559  
t7xx_fsm_get_md_state(struct t7xx_fsm_ctl * ctl)560  enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
561  {
562  	if (ctl)
563  		return ctl->md_state;
564  
565  	return MD_STATE_INVALID;
566  }
567  
t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl * ctl)568  unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
569  {
570  	if (ctl)
571  		return ctl->curr_state;
572  
573  	return FSM_STATE_STOPPED;
574  }
575  
t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl * ctl,enum t7xx_md_irq_type type)576  int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
577  {
578  	unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
579  
580  	if (type == MD_IRQ_PORT_ENUM) {
581  		return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
582  	} else if (type == MD_IRQ_CCIF_EX) {
583  		ctl->exp_flg = true;
584  		wake_up(&ctl->async_hk_wq);
585  		cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
586  		return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
587  	}
588  
589  	return -EINVAL;
590  }
591  
t7xx_fsm_reset(struct t7xx_modem * md)592  void t7xx_fsm_reset(struct t7xx_modem *md)
593  {
594  	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
595  
596  	fsm_flush_event_cmd_qs(ctl);
597  	ctl->curr_state = FSM_STATE_STOPPED;
598  	ctl->exp_flg = false;
599  	ctl->status = T7XX_DEV_STAGE_INIT;
600  }
601  
t7xx_fsm_init(struct t7xx_modem * md)602  int t7xx_fsm_init(struct t7xx_modem *md)
603  {
604  	struct device *dev = &md->t7xx_dev->pdev->dev;
605  	struct t7xx_fsm_ctl *ctl;
606  
607  	ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
608  	if (!ctl)
609  		return -ENOMEM;
610  
611  	md->fsm_ctl = ctl;
612  	ctl->md = md;
613  	ctl->curr_state = FSM_STATE_INIT;
614  	INIT_LIST_HEAD(&ctl->command_queue);
615  	INIT_LIST_HEAD(&ctl->event_queue);
616  	init_waitqueue_head(&ctl->async_hk_wq);
617  	init_waitqueue_head(&ctl->event_wq);
618  	INIT_LIST_HEAD(&ctl->notifier_list);
619  	init_waitqueue_head(&ctl->command_wq);
620  	spin_lock_init(&ctl->event_lock);
621  	spin_lock_init(&ctl->command_lock);
622  	ctl->exp_flg = false;
623  	spin_lock_init(&ctl->notifier_lock);
624  
625  	ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
626  	return PTR_ERR_OR_ZERO(ctl->fsm_thread);
627  }
628  
t7xx_fsm_uninit(struct t7xx_modem * md)629  void t7xx_fsm_uninit(struct t7xx_modem *md)
630  {
631  	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
632  
633  	if (!ctl)
634  		return;
635  
636  	if (ctl->fsm_thread)
637  		kthread_stop(ctl->fsm_thread);
638  
639  	fsm_flush_event_cmd_qs(ctl);
640  }
641