1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4   *
5   */
6  
7  #include <linux/delay.h>
8  #include <linux/device.h>
9  #include <linux/dma-direction.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/interrupt.h>
12  #include <linux/list.h>
13  #include <linux/mhi.h>
14  #include <linux/module.h>
15  #include <linux/slab.h>
16  #include <linux/wait.h>
17  #include "internal.h"
18  #include "trace.h"
19  
20  /*
21   * Not all MHI state transitions are synchronous. Transitions like Linkdown,
22   * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
23   * transition to a new state only if we're allowed to.
24   *
25   * Priority increases as we go down. For instance, from any state in L0, the
26   * transition can be made to states in L1, L2 and L3. A notable exception to
27   * this rule is state DISABLE.  From DISABLE state we can only transition to
28   * POR state. Also, while in L2 state, user cannot jump back to previous
29   * L1 or L0 states.
30   *
31   * Valid transitions:
32   * L0: DISABLE <--> POR
33   *     POR <--> POR
34   *     POR -> M0 -> M2 --> M0
35   *     POR -> FW_DL_ERR
36   *     FW_DL_ERR <--> FW_DL_ERR
37   *     M0 <--> M0
38   *     M0 -> FW_DL_ERR
39   *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
40   * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
41   *     SYS_ERR_PROCESS -> SYS_ERR_FAIL
42   *     SYS_ERR_FAIL -> SYS_ERR_DETECT
43   *     SYS_ERR_PROCESS --> POR
44   * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
45   *     SHUTDOWN_PROCESS -> DISABLE
46   * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
47   *     LD_ERR_FATAL_DETECT -> DISABLE
48   */
49  static const struct mhi_pm_transitions dev_state_transitions[] = {
50  	/* L0 States */
51  	{
52  		MHI_PM_DISABLE,
53  		MHI_PM_POR
54  	},
55  	{
56  		MHI_PM_POR,
57  		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
58  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
59  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
60  	},
61  	{
62  		MHI_PM_M0,
63  		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
64  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
65  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
66  	},
67  	{
68  		MHI_PM_M2,
69  		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
70  		MHI_PM_LD_ERR_FATAL_DETECT
71  	},
72  	{
73  		MHI_PM_M3_ENTER,
74  		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
75  		MHI_PM_LD_ERR_FATAL_DETECT
76  	},
77  	{
78  		MHI_PM_M3,
79  		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
80  		MHI_PM_LD_ERR_FATAL_DETECT
81  	},
82  	{
83  		MHI_PM_M3_EXIT,
84  		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
85  		MHI_PM_LD_ERR_FATAL_DETECT
86  	},
87  	{
88  		MHI_PM_FW_DL_ERR,
89  		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
90  		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
91  	},
92  	/* L1 States */
93  	{
94  		MHI_PM_SYS_ERR_DETECT,
95  		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
96  		MHI_PM_LD_ERR_FATAL_DETECT
97  	},
98  	{
99  		MHI_PM_SYS_ERR_PROCESS,
100  		MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
101  		MHI_PM_LD_ERR_FATAL_DETECT
102  	},
103  	{
104  		MHI_PM_SYS_ERR_FAIL,
105  		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
106  		MHI_PM_LD_ERR_FATAL_DETECT
107  	},
108  	/* L2 States */
109  	{
110  		MHI_PM_SHUTDOWN_PROCESS,
111  		MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
112  	},
113  	/* L3 States */
114  	{
115  		MHI_PM_LD_ERR_FATAL_DETECT,
116  		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
117  	},
118  };
119  
mhi_tryset_pm_state(struct mhi_controller * mhi_cntrl,enum mhi_pm_state state)120  enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
121  						   enum mhi_pm_state state)
122  {
123  	unsigned long cur_state = mhi_cntrl->pm_state;
124  	int index = find_last_bit(&cur_state, 32);
125  
126  	if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
127  		return cur_state;
128  
129  	if (unlikely(dev_state_transitions[index].from_state != cur_state))
130  		return cur_state;
131  
132  	if (unlikely(!(dev_state_transitions[index].to_states & state)))
133  		return cur_state;
134  
135  	trace_mhi_tryset_pm_state(mhi_cntrl, state);
136  	mhi_cntrl->pm_state = state;
137  	return mhi_cntrl->pm_state;
138  }
139  
mhi_set_mhi_state(struct mhi_controller * mhi_cntrl,enum mhi_state state)140  void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
141  {
142  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
143  	int ret;
144  
145  	if (state == MHI_STATE_RESET) {
146  		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
147  					  MHICTRL_RESET_MASK, 1);
148  	} else {
149  		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
150  					  MHICTRL_MHISTATE_MASK, state);
151  	}
152  
153  	if (ret)
154  		dev_err(dev, "Failed to set MHI state to: %s\n",
155  			mhi_state_str(state));
156  }
157  
158  /* NOP for backward compatibility, host allowed to ring DB in M2 state */
mhi_toggle_dev_wake_nop(struct mhi_controller * mhi_cntrl)159  static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
160  {
161  }
162  
mhi_toggle_dev_wake(struct mhi_controller * mhi_cntrl)163  static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
164  {
165  	mhi_cntrl->wake_get(mhi_cntrl, false);
166  	mhi_cntrl->wake_put(mhi_cntrl, true);
167  }
168  
169  /* Handle device ready state transition */
mhi_ready_state_transition(struct mhi_controller * mhi_cntrl)170  int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
171  {
172  	struct mhi_event *mhi_event;
173  	enum mhi_pm_state cur_state;
174  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
175  	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
176  	u32 timeout_ms;
177  	int ret, i;
178  
179  	/* Check if device entered error state */
180  	if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
181  		dev_err(dev, "Device link is not accessible\n");
182  		return -EIO;
183  	}
184  
185  	/* Wait for RESET to be cleared and READY bit to be set by the device */
186  	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
187  				 MHICTRL_RESET_MASK, 0, interval_us,
188  				 mhi_cntrl->timeout_ms);
189  	if (ret) {
190  		dev_err(dev, "Device failed to clear MHI Reset\n");
191  		return ret;
192  	}
193  
194  	timeout_ms = mhi_cntrl->ready_timeout_ms ?
195  		mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
196  	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
197  				 MHISTATUS_READY_MASK, 1, interval_us,
198  				 timeout_ms);
199  	if (ret) {
200  		dev_err(dev, "Device failed to enter MHI Ready\n");
201  		return ret;
202  	}
203  
204  	dev_dbg(dev, "Device in READY State\n");
205  	write_lock_irq(&mhi_cntrl->pm_lock);
206  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
207  	mhi_cntrl->dev_state = MHI_STATE_READY;
208  	write_unlock_irq(&mhi_cntrl->pm_lock);
209  
210  	if (cur_state != MHI_PM_POR) {
211  		dev_err(dev, "Error moving to state %s from %s\n",
212  			to_mhi_pm_state_str(MHI_PM_POR),
213  			to_mhi_pm_state_str(cur_state));
214  		return -EIO;
215  	}
216  
217  	read_lock_bh(&mhi_cntrl->pm_lock);
218  	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
219  		dev_err(dev, "Device registers not accessible\n");
220  		goto error_mmio;
221  	}
222  
223  	/* Configure MMIO registers */
224  	ret = mhi_init_mmio(mhi_cntrl);
225  	if (ret) {
226  		dev_err(dev, "Error configuring MMIO registers\n");
227  		goto error_mmio;
228  	}
229  
230  	/* Add elements to all SW event rings */
231  	mhi_event = mhi_cntrl->mhi_event;
232  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
233  		struct mhi_ring *ring = &mhi_event->ring;
234  
235  		/* Skip if this is an offload or HW event */
236  		if (mhi_event->offload_ev || mhi_event->hw_ring)
237  			continue;
238  
239  		ring->wp = ring->base + ring->len - ring->el_size;
240  		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
241  		/* Update all cores */
242  		smp_wmb();
243  
244  		/* Ring the event ring db */
245  		spin_lock_irq(&mhi_event->lock);
246  		mhi_ring_er_db(mhi_event);
247  		spin_unlock_irq(&mhi_event->lock);
248  	}
249  
250  	/* Set MHI to M0 state */
251  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
252  	read_unlock_bh(&mhi_cntrl->pm_lock);
253  
254  	return 0;
255  
256  error_mmio:
257  	read_unlock_bh(&mhi_cntrl->pm_lock);
258  
259  	return -EIO;
260  }
261  
mhi_pm_m0_transition(struct mhi_controller * mhi_cntrl)262  int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
263  {
264  	enum mhi_pm_state cur_state;
265  	struct mhi_chan *mhi_chan;
266  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
267  	int i;
268  
269  	write_lock_irq(&mhi_cntrl->pm_lock);
270  	mhi_cntrl->dev_state = MHI_STATE_M0;
271  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
272  	write_unlock_irq(&mhi_cntrl->pm_lock);
273  	if (unlikely(cur_state != MHI_PM_M0)) {
274  		dev_err(dev, "Unable to transition to M0 state\n");
275  		return -EIO;
276  	}
277  	mhi_cntrl->M0++;
278  
279  	/* Wake up the device */
280  	read_lock_bh(&mhi_cntrl->pm_lock);
281  	mhi_cntrl->wake_get(mhi_cntrl, true);
282  
283  	/* Ring all event rings and CMD ring only if we're in mission mode */
284  	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
285  		struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
286  		struct mhi_cmd *mhi_cmd =
287  			&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
288  
289  		for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
290  			if (mhi_event->offload_ev)
291  				continue;
292  
293  			spin_lock_irq(&mhi_event->lock);
294  			mhi_ring_er_db(mhi_event);
295  			spin_unlock_irq(&mhi_event->lock);
296  		}
297  
298  		/* Only ring primary cmd ring if ring is not empty */
299  		spin_lock_irq(&mhi_cmd->lock);
300  		if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
301  			mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
302  		spin_unlock_irq(&mhi_cmd->lock);
303  	}
304  
305  	/* Ring channel DB registers */
306  	mhi_chan = mhi_cntrl->mhi_chan;
307  	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
308  		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
309  
310  		if (mhi_chan->db_cfg.reset_req) {
311  			write_lock_irq(&mhi_chan->lock);
312  			mhi_chan->db_cfg.db_mode = true;
313  			write_unlock_irq(&mhi_chan->lock);
314  		}
315  
316  		read_lock_irq(&mhi_chan->lock);
317  
318  		/* Only ring DB if ring is not empty */
319  		if (tre_ring->base && tre_ring->wp  != tre_ring->rp &&
320  		    mhi_chan->ch_state == MHI_CH_STATE_ENABLED)
321  			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
322  		read_unlock_irq(&mhi_chan->lock);
323  	}
324  
325  	mhi_cntrl->wake_put(mhi_cntrl, false);
326  	read_unlock_bh(&mhi_cntrl->pm_lock);
327  	wake_up_all(&mhi_cntrl->state_event);
328  
329  	return 0;
330  }
331  
332  /*
333   * After receiving the MHI state change event from the device indicating the
334   * transition to M1 state, the host can transition the device to M2 state
335   * for keeping it in low power state.
336   */
mhi_pm_m1_transition(struct mhi_controller * mhi_cntrl)337  void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
338  {
339  	enum mhi_pm_state state;
340  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
341  
342  	write_lock_irq(&mhi_cntrl->pm_lock);
343  	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
344  	if (state == MHI_PM_M2) {
345  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
346  		mhi_cntrl->dev_state = MHI_STATE_M2;
347  
348  		write_unlock_irq(&mhi_cntrl->pm_lock);
349  
350  		mhi_cntrl->M2++;
351  		wake_up_all(&mhi_cntrl->state_event);
352  
353  		/* If there are any pending resources, exit M2 immediately */
354  		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
355  			     atomic_read(&mhi_cntrl->dev_wake))) {
356  			dev_dbg(dev,
357  				"Exiting M2, pending_pkts: %d dev_wake: %d\n",
358  				atomic_read(&mhi_cntrl->pending_pkts),
359  				atomic_read(&mhi_cntrl->dev_wake));
360  			read_lock_bh(&mhi_cntrl->pm_lock);
361  			mhi_cntrl->wake_get(mhi_cntrl, true);
362  			mhi_cntrl->wake_put(mhi_cntrl, true);
363  			read_unlock_bh(&mhi_cntrl->pm_lock);
364  		} else {
365  			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
366  		}
367  	} else {
368  		write_unlock_irq(&mhi_cntrl->pm_lock);
369  	}
370  }
371  
372  /* MHI M3 completion handler */
mhi_pm_m3_transition(struct mhi_controller * mhi_cntrl)373  int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
374  {
375  	enum mhi_pm_state state;
376  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
377  
378  	write_lock_irq(&mhi_cntrl->pm_lock);
379  	mhi_cntrl->dev_state = MHI_STATE_M3;
380  	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
381  	write_unlock_irq(&mhi_cntrl->pm_lock);
382  	if (state != MHI_PM_M3) {
383  		dev_err(dev, "Unable to transition to M3 state\n");
384  		return -EIO;
385  	}
386  
387  	mhi_cntrl->M3++;
388  	wake_up_all(&mhi_cntrl->state_event);
389  
390  	return 0;
391  }
392  
393  /* Handle device Mission Mode transition */
mhi_pm_mission_mode_transition(struct mhi_controller * mhi_cntrl)394  static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
395  {
396  	struct mhi_event *mhi_event;
397  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
398  	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
399  	int i, ret;
400  
401  	dev_dbg(dev, "Processing Mission Mode transition\n");
402  
403  	write_lock_irq(&mhi_cntrl->pm_lock);
404  	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
405  		ee = mhi_get_exec_env(mhi_cntrl);
406  
407  	if (!MHI_IN_MISSION_MODE(ee)) {
408  		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
409  		write_unlock_irq(&mhi_cntrl->pm_lock);
410  		wake_up_all(&mhi_cntrl->state_event);
411  		return -EIO;
412  	}
413  	mhi_cntrl->ee = ee;
414  	write_unlock_irq(&mhi_cntrl->pm_lock);
415  
416  	wake_up_all(&mhi_cntrl->state_event);
417  
418  	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
419  			      mhi_destroy_device);
420  	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
421  
422  	/* Force MHI to be in M0 state before continuing */
423  	ret = __mhi_device_get_sync(mhi_cntrl);
424  	if (ret)
425  		return ret;
426  
427  	read_lock_bh(&mhi_cntrl->pm_lock);
428  
429  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
430  		ret = -EIO;
431  		goto error_mission_mode;
432  	}
433  
434  	/* Add elements to all HW event rings */
435  	mhi_event = mhi_cntrl->mhi_event;
436  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
437  		struct mhi_ring *ring = &mhi_event->ring;
438  
439  		if (mhi_event->offload_ev || !mhi_event->hw_ring)
440  			continue;
441  
442  		ring->wp = ring->base + ring->len - ring->el_size;
443  		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
444  		/* Update to all cores */
445  		smp_wmb();
446  
447  		spin_lock_irq(&mhi_event->lock);
448  		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
449  			mhi_ring_er_db(mhi_event);
450  		spin_unlock_irq(&mhi_event->lock);
451  	}
452  
453  	read_unlock_bh(&mhi_cntrl->pm_lock);
454  
455  	/*
456  	 * The MHI devices are only created when the client device switches its
457  	 * Execution Environment (EE) to either SBL or AMSS states
458  	 */
459  	mhi_create_devices(mhi_cntrl);
460  
461  	read_lock_bh(&mhi_cntrl->pm_lock);
462  
463  error_mission_mode:
464  	mhi_cntrl->wake_put(mhi_cntrl, false);
465  	read_unlock_bh(&mhi_cntrl->pm_lock);
466  
467  	return ret;
468  }
469  
470  /* Handle shutdown transitions */
mhi_pm_disable_transition(struct mhi_controller * mhi_cntrl,bool destroy_device)471  static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
472  				      bool destroy_device)
473  {
474  	enum mhi_pm_state cur_state;
475  	struct mhi_event *mhi_event;
476  	struct mhi_cmd_ctxt *cmd_ctxt;
477  	struct mhi_cmd *mhi_cmd;
478  	struct mhi_event_ctxt *er_ctxt;
479  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
480  	int ret, i;
481  
482  	dev_dbg(dev, "Processing disable transition with PM state: %s\n",
483  		to_mhi_pm_state_str(mhi_cntrl->pm_state));
484  
485  	mutex_lock(&mhi_cntrl->pm_mutex);
486  
487  	/* Trigger MHI RESET so that the device will not access host memory */
488  	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
489  		/* Skip MHI RESET if in RDDM state */
490  		if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
491  			goto skip_mhi_reset;
492  
493  		dev_dbg(dev, "Triggering MHI Reset in device\n");
494  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
495  
496  		/* Wait for the reset bit to be cleared by the device */
497  		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
498  				 MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
499  		if (ret)
500  			dev_err(dev, "Device failed to clear MHI Reset\n");
501  
502  		/*
503  		 * Device will clear BHI_INTVEC as a part of RESET processing,
504  		 * hence re-program it
505  		 */
506  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
507  
508  		if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
509  			/* wait for ready to be set */
510  			ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
511  						 MHISTATUS, MHISTATUS_READY_MASK,
512  						 1, 25000, mhi_cntrl->timeout_ms);
513  			if (ret)
514  				dev_err(dev, "Device failed to enter READY state\n");
515  		}
516  	}
517  
518  skip_mhi_reset:
519  	dev_dbg(dev,
520  		 "Waiting for all pending event ring processing to complete\n");
521  	mhi_event = mhi_cntrl->mhi_event;
522  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
523  		if (mhi_event->offload_ev)
524  			continue;
525  		disable_irq(mhi_cntrl->irq[mhi_event->irq]);
526  		tasklet_kill(&mhi_event->task);
527  	}
528  
529  	/* Release lock and wait for all pending threads to complete */
530  	mutex_unlock(&mhi_cntrl->pm_mutex);
531  	dev_dbg(dev, "Waiting for all pending threads to complete\n");
532  	wake_up_all(&mhi_cntrl->state_event);
533  
534  	/*
535  	 * Only destroy the 'struct device' for channels if indicated by the
536  	 * 'destroy_device' flag. Because, during system suspend or hibernation
537  	 * state, there is no need to destroy the 'struct device' as the endpoint
538  	 * device would still be physically attached to the machine.
539  	 */
540  	if (destroy_device) {
541  		dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
542  		device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
543  	}
544  
545  	mutex_lock(&mhi_cntrl->pm_mutex);
546  
547  	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
548  	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
549  
550  	/* Reset the ev rings and cmd rings */
551  	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
552  	mhi_cmd = mhi_cntrl->mhi_cmd;
553  	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
554  	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
555  		struct mhi_ring *ring = &mhi_cmd->ring;
556  
557  		ring->rp = ring->base;
558  		ring->wp = ring->base;
559  		cmd_ctxt->rp = cmd_ctxt->rbase;
560  		cmd_ctxt->wp = cmd_ctxt->rbase;
561  	}
562  
563  	mhi_event = mhi_cntrl->mhi_event;
564  	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
565  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
566  		     mhi_event++) {
567  		struct mhi_ring *ring = &mhi_event->ring;
568  
569  		/* Skip offload events */
570  		if (mhi_event->offload_ev)
571  			continue;
572  
573  		ring->rp = ring->base;
574  		ring->wp = ring->base;
575  		er_ctxt->rp = er_ctxt->rbase;
576  		er_ctxt->wp = er_ctxt->rbase;
577  	}
578  
579  	/* Move to disable state */
580  	write_lock_irq(&mhi_cntrl->pm_lock);
581  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
582  	write_unlock_irq(&mhi_cntrl->pm_lock);
583  	if (unlikely(cur_state != MHI_PM_DISABLE))
584  		dev_err(dev, "Error moving from PM state: %s to: %s\n",
585  			to_mhi_pm_state_str(cur_state),
586  			to_mhi_pm_state_str(MHI_PM_DISABLE));
587  
588  	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
589  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
590  		mhi_state_str(mhi_cntrl->dev_state));
591  
592  	mutex_unlock(&mhi_cntrl->pm_mutex);
593  }
594  
595  /* Handle system error transitions */
mhi_pm_sys_error_transition(struct mhi_controller * mhi_cntrl)596  static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
597  {
598  	enum mhi_pm_state cur_state, prev_state;
599  	enum dev_st_transition next_state;
600  	struct mhi_event *mhi_event;
601  	struct mhi_cmd_ctxt *cmd_ctxt;
602  	struct mhi_cmd *mhi_cmd;
603  	struct mhi_event_ctxt *er_ctxt;
604  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
605  	int ret, i;
606  
607  	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
608  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
609  		to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
610  
611  	/* We must notify MHI control driver so it can clean up first */
612  	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
613  
614  	mutex_lock(&mhi_cntrl->pm_mutex);
615  	write_lock_irq(&mhi_cntrl->pm_lock);
616  	prev_state = mhi_cntrl->pm_state;
617  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
618  	write_unlock_irq(&mhi_cntrl->pm_lock);
619  
620  	if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
621  		dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
622  			to_mhi_pm_state_str(cur_state),
623  			to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
624  		goto exit_sys_error_transition;
625  	}
626  
627  	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
628  	mhi_cntrl->dev_state = MHI_STATE_RESET;
629  
630  	/* Wake up threads waiting for state transition */
631  	wake_up_all(&mhi_cntrl->state_event);
632  
633  	/* Trigger MHI RESET so that the device will not access host memory */
634  	if (MHI_REG_ACCESS_VALID(prev_state)) {
635  		u32 in_reset = -1;
636  		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
637  
638  		dev_dbg(dev, "Triggering MHI Reset in device\n");
639  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
640  
641  		/* Wait for the reset bit to be cleared by the device */
642  		ret = wait_event_timeout(mhi_cntrl->state_event,
643  					 mhi_read_reg_field(mhi_cntrl,
644  							    mhi_cntrl->regs,
645  							    MHICTRL,
646  							    MHICTRL_RESET_MASK,
647  							    &in_reset) ||
648  					!in_reset, timeout);
649  		if (!ret || in_reset) {
650  			dev_err(dev, "Device failed to exit MHI Reset state\n");
651  			write_lock_irq(&mhi_cntrl->pm_lock);
652  			cur_state = mhi_tryset_pm_state(mhi_cntrl,
653  							MHI_PM_SYS_ERR_FAIL);
654  			write_unlock_irq(&mhi_cntrl->pm_lock);
655  			/* Shutdown may have occurred, otherwise cleanup now */
656  			if (cur_state != MHI_PM_SYS_ERR_FAIL)
657  				goto exit_sys_error_transition;
658  		}
659  
660  		/*
661  		 * Device will clear BHI_INTVEC as a part of RESET processing,
662  		 * hence re-program it
663  		 */
664  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
665  	}
666  
667  	dev_dbg(dev,
668  		"Waiting for all pending event ring processing to complete\n");
669  	mhi_event = mhi_cntrl->mhi_event;
670  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
671  		if (mhi_event->offload_ev)
672  			continue;
673  		tasklet_kill(&mhi_event->task);
674  	}
675  
676  	/* Release lock and wait for all pending threads to complete */
677  	mutex_unlock(&mhi_cntrl->pm_mutex);
678  	dev_dbg(dev, "Waiting for all pending threads to complete\n");
679  	wake_up_all(&mhi_cntrl->state_event);
680  
681  	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
682  	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
683  
684  	mutex_lock(&mhi_cntrl->pm_mutex);
685  
686  	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
687  	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
688  
689  	/* Reset the ev rings and cmd rings */
690  	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
691  	mhi_cmd = mhi_cntrl->mhi_cmd;
692  	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
693  	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
694  		struct mhi_ring *ring = &mhi_cmd->ring;
695  
696  		ring->rp = ring->base;
697  		ring->wp = ring->base;
698  		cmd_ctxt->rp = cmd_ctxt->rbase;
699  		cmd_ctxt->wp = cmd_ctxt->rbase;
700  	}
701  
702  	mhi_event = mhi_cntrl->mhi_event;
703  	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
704  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
705  	     mhi_event++) {
706  		struct mhi_ring *ring = &mhi_event->ring;
707  
708  		/* Skip offload events */
709  		if (mhi_event->offload_ev)
710  			continue;
711  
712  		ring->rp = ring->base;
713  		ring->wp = ring->base;
714  		er_ctxt->rp = er_ctxt->rbase;
715  		er_ctxt->wp = er_ctxt->rbase;
716  	}
717  
718  	/* Transition to next state */
719  	if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
720  		write_lock_irq(&mhi_cntrl->pm_lock);
721  		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
722  		write_unlock_irq(&mhi_cntrl->pm_lock);
723  		if (cur_state != MHI_PM_POR) {
724  			dev_err(dev, "Error moving to state %s from %s\n",
725  				to_mhi_pm_state_str(MHI_PM_POR),
726  				to_mhi_pm_state_str(cur_state));
727  			goto exit_sys_error_transition;
728  		}
729  		next_state = DEV_ST_TRANSITION_PBL;
730  	} else {
731  		next_state = DEV_ST_TRANSITION_READY;
732  	}
733  
734  	mhi_queue_state_transition(mhi_cntrl, next_state);
735  
736  exit_sys_error_transition:
737  	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
738  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
739  		mhi_state_str(mhi_cntrl->dev_state));
740  
741  	mutex_unlock(&mhi_cntrl->pm_mutex);
742  }
743  
744  /* Queue a new work item and schedule work */
mhi_queue_state_transition(struct mhi_controller * mhi_cntrl,enum dev_st_transition state)745  int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
746  			       enum dev_st_transition state)
747  {
748  	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
749  	unsigned long flags;
750  
751  	if (!item)
752  		return -ENOMEM;
753  
754  	item->state = state;
755  	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
756  	list_add_tail(&item->node, &mhi_cntrl->transition_list);
757  	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
758  
759  	queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
760  
761  	return 0;
762  }
763  
764  /* SYS_ERR worker */
mhi_pm_sys_err_handler(struct mhi_controller * mhi_cntrl)765  void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
766  {
767  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
768  
769  	/* skip if controller supports RDDM */
770  	if (mhi_cntrl->rddm_image) {
771  		dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
772  		return;
773  	}
774  
775  	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
776  }
777  
778  /* Device State Transition worker */
mhi_pm_st_worker(struct work_struct * work)779  void mhi_pm_st_worker(struct work_struct *work)
780  {
781  	struct state_transition *itr, *tmp;
782  	LIST_HEAD(head);
783  	struct mhi_controller *mhi_cntrl = container_of(work,
784  							struct mhi_controller,
785  							st_worker);
786  
787  	spin_lock_irq(&mhi_cntrl->transition_lock);
788  	list_splice_tail_init(&mhi_cntrl->transition_list, &head);
789  	spin_unlock_irq(&mhi_cntrl->transition_lock);
790  
791  	list_for_each_entry_safe(itr, tmp, &head, node) {
792  		list_del(&itr->node);
793  
794  		trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
795  
796  		switch (itr->state) {
797  		case DEV_ST_TRANSITION_PBL:
798  			write_lock_irq(&mhi_cntrl->pm_lock);
799  			if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
800  				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
801  			write_unlock_irq(&mhi_cntrl->pm_lock);
802  			mhi_fw_load_handler(mhi_cntrl);
803  			break;
804  		case DEV_ST_TRANSITION_SBL:
805  			write_lock_irq(&mhi_cntrl->pm_lock);
806  			mhi_cntrl->ee = MHI_EE_SBL;
807  			write_unlock_irq(&mhi_cntrl->pm_lock);
808  			/*
809  			 * The MHI devices are only created when the client
810  			 * device switches its Execution Environment (EE) to
811  			 * either SBL or AMSS states
812  			 */
813  			mhi_create_devices(mhi_cntrl);
814  			if (mhi_cntrl->fbc_download)
815  				mhi_download_amss_image(mhi_cntrl);
816  			break;
817  		case DEV_ST_TRANSITION_MISSION_MODE:
818  			mhi_pm_mission_mode_transition(mhi_cntrl);
819  			break;
820  		case DEV_ST_TRANSITION_FP:
821  			write_lock_irq(&mhi_cntrl->pm_lock);
822  			mhi_cntrl->ee = MHI_EE_FP;
823  			write_unlock_irq(&mhi_cntrl->pm_lock);
824  			mhi_create_devices(mhi_cntrl);
825  			break;
826  		case DEV_ST_TRANSITION_READY:
827  			mhi_ready_state_transition(mhi_cntrl);
828  			break;
829  		case DEV_ST_TRANSITION_SYS_ERR:
830  			mhi_pm_sys_error_transition(mhi_cntrl);
831  			break;
832  		case DEV_ST_TRANSITION_DISABLE:
833  			mhi_pm_disable_transition(mhi_cntrl, false);
834  			break;
835  		case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE:
836  			mhi_pm_disable_transition(mhi_cntrl, true);
837  			break;
838  		default:
839  			break;
840  		}
841  		kfree(itr);
842  	}
843  }
844  
mhi_pm_suspend(struct mhi_controller * mhi_cntrl)845  int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
846  {
847  	struct mhi_chan *itr, *tmp;
848  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
849  	enum mhi_pm_state new_state;
850  	int ret;
851  
852  	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
853  		return -EINVAL;
854  
855  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
856  		return -EIO;
857  
858  	/* Return busy if there are any pending resources */
859  	if (atomic_read(&mhi_cntrl->dev_wake) ||
860  	    atomic_read(&mhi_cntrl->pending_pkts))
861  		return -EBUSY;
862  
863  	/* Take MHI out of M2 state */
864  	read_lock_bh(&mhi_cntrl->pm_lock);
865  	mhi_cntrl->wake_get(mhi_cntrl, false);
866  	read_unlock_bh(&mhi_cntrl->pm_lock);
867  
868  	ret = wait_event_timeout(mhi_cntrl->state_event,
869  				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
870  				 mhi_cntrl->dev_state == MHI_STATE_M1 ||
871  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
872  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
873  
874  	read_lock_bh(&mhi_cntrl->pm_lock);
875  	mhi_cntrl->wake_put(mhi_cntrl, false);
876  	read_unlock_bh(&mhi_cntrl->pm_lock);
877  
878  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
879  		dev_err(dev,
880  			"Could not enter M0/M1 state");
881  		return -EIO;
882  	}
883  
884  	write_lock_irq(&mhi_cntrl->pm_lock);
885  
886  	if (atomic_read(&mhi_cntrl->dev_wake) ||
887  	    atomic_read(&mhi_cntrl->pending_pkts)) {
888  		write_unlock_irq(&mhi_cntrl->pm_lock);
889  		return -EBUSY;
890  	}
891  
892  	dev_dbg(dev, "Allowing M3 transition\n");
893  	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
894  	if (new_state != MHI_PM_M3_ENTER) {
895  		write_unlock_irq(&mhi_cntrl->pm_lock);
896  		dev_err(dev,
897  			"Error setting to PM state: %s from: %s\n",
898  			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
899  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
900  		return -EIO;
901  	}
902  
903  	/* Set MHI to M3 and wait for completion */
904  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
905  	write_unlock_irq(&mhi_cntrl->pm_lock);
906  	dev_dbg(dev, "Waiting for M3 completion\n");
907  
908  	ret = wait_event_timeout(mhi_cntrl->state_event,
909  				 mhi_cntrl->dev_state == MHI_STATE_M3 ||
910  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
911  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
912  
913  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
914  		dev_err(dev,
915  			"Did not enter M3 state, MHI state: %s, PM state: %s\n",
916  			mhi_state_str(mhi_cntrl->dev_state),
917  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
918  		return -EIO;
919  	}
920  
921  	/* Notify clients about entering LPM */
922  	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
923  		mutex_lock(&itr->mutex);
924  		if (itr->mhi_dev)
925  			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
926  		mutex_unlock(&itr->mutex);
927  	}
928  
929  	return 0;
930  }
931  EXPORT_SYMBOL_GPL(mhi_pm_suspend);
932  
__mhi_pm_resume(struct mhi_controller * mhi_cntrl,bool force)933  static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
934  {
935  	struct mhi_chan *itr, *tmp;
936  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
937  	enum mhi_pm_state cur_state;
938  	int ret;
939  
940  	dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
941  		to_mhi_pm_state_str(mhi_cntrl->pm_state),
942  		mhi_state_str(mhi_cntrl->dev_state));
943  
944  	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
945  		return 0;
946  
947  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
948  		return -EIO;
949  
950  	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
951  		dev_warn(dev, "Resuming from non M3 state (%s)\n",
952  			 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
953  		if (!force)
954  			return -EINVAL;
955  	}
956  
957  	/* Notify clients about exiting LPM */
958  	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
959  		mutex_lock(&itr->mutex);
960  		if (itr->mhi_dev)
961  			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
962  		mutex_unlock(&itr->mutex);
963  	}
964  
965  	write_lock_irq(&mhi_cntrl->pm_lock);
966  	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
967  	if (cur_state != MHI_PM_M3_EXIT) {
968  		write_unlock_irq(&mhi_cntrl->pm_lock);
969  		dev_info(dev,
970  			 "Error setting to PM state: %s from: %s\n",
971  			 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
972  			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
973  		return -EIO;
974  	}
975  
976  	/* Set MHI to M0 and wait for completion */
977  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
978  	write_unlock_irq(&mhi_cntrl->pm_lock);
979  
980  	ret = wait_event_timeout(mhi_cntrl->state_event,
981  				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
982  				 mhi_cntrl->dev_state == MHI_STATE_M2 ||
983  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
984  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
985  
986  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
987  		dev_err(dev,
988  			"Did not enter M0 state, MHI state: %s, PM state: %s\n",
989  			mhi_state_str(mhi_cntrl->dev_state),
990  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
991  		return -EIO;
992  	}
993  
994  	return 0;
995  }
996  
mhi_pm_resume(struct mhi_controller * mhi_cntrl)997  int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
998  {
999  	return __mhi_pm_resume(mhi_cntrl, false);
1000  }
1001  EXPORT_SYMBOL_GPL(mhi_pm_resume);
1002  
mhi_pm_resume_force(struct mhi_controller * mhi_cntrl)1003  int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
1004  {
1005  	return __mhi_pm_resume(mhi_cntrl, true);
1006  }
1007  EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
1008  
__mhi_device_get_sync(struct mhi_controller * mhi_cntrl)1009  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
1010  {
1011  	int ret;
1012  
1013  	/* Wake up the device */
1014  	read_lock_bh(&mhi_cntrl->pm_lock);
1015  	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1016  		read_unlock_bh(&mhi_cntrl->pm_lock);
1017  		return -EIO;
1018  	}
1019  	mhi_cntrl->wake_get(mhi_cntrl, true);
1020  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1021  		mhi_trigger_resume(mhi_cntrl);
1022  	read_unlock_bh(&mhi_cntrl->pm_lock);
1023  
1024  	ret = wait_event_timeout(mhi_cntrl->state_event,
1025  				 mhi_cntrl->pm_state == MHI_PM_M0 ||
1026  				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1027  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1028  
1029  	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1030  		read_lock_bh(&mhi_cntrl->pm_lock);
1031  		mhi_cntrl->wake_put(mhi_cntrl, false);
1032  		read_unlock_bh(&mhi_cntrl->pm_lock);
1033  		return -EIO;
1034  	}
1035  
1036  	return 0;
1037  }
1038  
1039  /* Assert device wake db */
mhi_assert_dev_wake(struct mhi_controller * mhi_cntrl,bool force)1040  static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
1041  {
1042  	unsigned long flags;
1043  
1044  	/*
1045  	 * If force flag is set, then increment the wake count value and
1046  	 * ring wake db
1047  	 */
1048  	if (unlikely(force)) {
1049  		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1050  		atomic_inc(&mhi_cntrl->dev_wake);
1051  		if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
1052  		    !mhi_cntrl->wake_set) {
1053  			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1054  			mhi_cntrl->wake_set = true;
1055  		}
1056  		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1057  	} else {
1058  		/*
1059  		 * If resources are already requested, then just increment
1060  		 * the wake count value and return
1061  		 */
1062  		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1063  			return;
1064  
1065  		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1066  		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1067  		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1068  		    !mhi_cntrl->wake_set) {
1069  			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1070  			mhi_cntrl->wake_set = true;
1071  		}
1072  		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1073  	}
1074  }
1075  
1076  /* De-assert device wake db */
mhi_deassert_dev_wake(struct mhi_controller * mhi_cntrl,bool override)1077  static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1078  				  bool override)
1079  {
1080  	unsigned long flags;
1081  
1082  	/*
1083  	 * Only continue if there is a single resource, else just decrement
1084  	 * and return
1085  	 */
1086  	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1087  		return;
1088  
1089  	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1090  	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1091  	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1092  	    mhi_cntrl->wake_set) {
1093  		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1094  		mhi_cntrl->wake_set = false;
1095  	}
1096  	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1097  }
1098  
mhi_async_power_up(struct mhi_controller * mhi_cntrl)1099  int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1100  {
1101  	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
1102  	enum mhi_state state;
1103  	enum mhi_ee_type current_ee;
1104  	enum dev_st_transition next_state;
1105  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1106  	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
1107  	int ret, i;
1108  
1109  	dev_info(dev, "Requested to power ON\n");
1110  
1111  	/* Supply default wake routines if not provided by controller driver */
1112  	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1113  	    !mhi_cntrl->wake_toggle) {
1114  		mhi_cntrl->wake_get = mhi_assert_dev_wake;
1115  		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1116  		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1117  			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1118  	}
1119  
1120  	mutex_lock(&mhi_cntrl->pm_mutex);
1121  	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1122  
1123  	/* Setup BHI INTVEC */
1124  	write_lock_irq(&mhi_cntrl->pm_lock);
1125  	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1126  	mhi_cntrl->pm_state = MHI_PM_POR;
1127  	mhi_cntrl->ee = MHI_EE_MAX;
1128  	current_ee = mhi_get_exec_env(mhi_cntrl);
1129  	write_unlock_irq(&mhi_cntrl->pm_lock);
1130  
1131  	/* Confirm that the device is in valid exec env */
1132  	if (!MHI_POWER_UP_CAPABLE(current_ee)) {
1133  		dev_err(dev, "%s is not a valid EE for power on\n",
1134  			TO_MHI_EXEC_STR(current_ee));
1135  		ret = -EIO;
1136  		goto error_exit;
1137  	}
1138  
1139  	state = mhi_get_mhi_state(mhi_cntrl);
1140  	dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
1141  		TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
1142  
1143  	if (state == MHI_STATE_SYS_ERR) {
1144  		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1145  		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
1146  				 MHICTRL_RESET_MASK, 0, interval_us,
1147  				 mhi_cntrl->timeout_ms);
1148  		if (ret) {
1149  			dev_info(dev, "Failed to reset MHI due to syserr state\n");
1150  			goto error_exit;
1151  		}
1152  
1153  		/*
1154  		 * device cleares INTVEC as part of RESET processing,
1155  		 * re-program it
1156  		 */
1157  		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1158  	}
1159  
1160  	/* IRQs have been requested during probe, so we just need to enable them. */
1161  	enable_irq(mhi_cntrl->irq[0]);
1162  
1163  	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
1164  		if (mhi_event->offload_ev)
1165  			continue;
1166  
1167  		enable_irq(mhi_cntrl->irq[mhi_event->irq]);
1168  	}
1169  
1170  	/* Transition to next state */
1171  	next_state = MHI_IN_PBL(current_ee) ?
1172  		DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1173  
1174  	mhi_queue_state_transition(mhi_cntrl, next_state);
1175  
1176  	mutex_unlock(&mhi_cntrl->pm_mutex);
1177  
1178  	dev_info(dev, "Power on setup success\n");
1179  
1180  	return 0;
1181  
1182  error_exit:
1183  	mhi_cntrl->pm_state = MHI_PM_DISABLE;
1184  	mutex_unlock(&mhi_cntrl->pm_mutex);
1185  
1186  	return ret;
1187  }
1188  EXPORT_SYMBOL_GPL(mhi_async_power_up);
1189  
__mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful,bool destroy_device)1190  static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
1191  			     bool destroy_device)
1192  {
1193  	enum mhi_pm_state cur_state, transition_state;
1194  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1195  
1196  	mutex_lock(&mhi_cntrl->pm_mutex);
1197  	write_lock_irq(&mhi_cntrl->pm_lock);
1198  	cur_state = mhi_cntrl->pm_state;
1199  	if (cur_state == MHI_PM_DISABLE) {
1200  		write_unlock_irq(&mhi_cntrl->pm_lock);
1201  		mutex_unlock(&mhi_cntrl->pm_mutex);
1202  		return; /* Already powered down */
1203  	}
1204  
1205  	/* If it's not a graceful shutdown, force MHI to linkdown state */
1206  	transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1207  			   MHI_PM_LD_ERR_FATAL_DETECT;
1208  
1209  	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1210  	if (cur_state != transition_state) {
1211  		dev_err(dev, "Failed to move to state: %s from: %s\n",
1212  			to_mhi_pm_state_str(transition_state),
1213  			to_mhi_pm_state_str(mhi_cntrl->pm_state));
1214  		/* Force link down or error fatal detected state */
1215  		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1216  	}
1217  
1218  	/* mark device inactive to avoid any further host processing */
1219  	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1220  	mhi_cntrl->dev_state = MHI_STATE_RESET;
1221  
1222  	wake_up_all(&mhi_cntrl->state_event);
1223  
1224  	write_unlock_irq(&mhi_cntrl->pm_lock);
1225  	mutex_unlock(&mhi_cntrl->pm_mutex);
1226  
1227  	if (destroy_device)
1228  		mhi_queue_state_transition(mhi_cntrl,
1229  					   DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
1230  	else
1231  		mhi_queue_state_transition(mhi_cntrl,
1232  					   DEV_ST_TRANSITION_DISABLE);
1233  
1234  	/* Wait for shutdown to complete */
1235  	flush_work(&mhi_cntrl->st_worker);
1236  
1237  	disable_irq(mhi_cntrl->irq[0]);
1238  }
1239  
mhi_power_down(struct mhi_controller * mhi_cntrl,bool graceful)1240  void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1241  {
1242  	__mhi_power_down(mhi_cntrl, graceful, true);
1243  }
1244  EXPORT_SYMBOL_GPL(mhi_power_down);
1245  
mhi_power_down_keep_dev(struct mhi_controller * mhi_cntrl,bool graceful)1246  void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl,
1247  			       bool graceful)
1248  {
1249  	__mhi_power_down(mhi_cntrl, graceful, false);
1250  }
1251  EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev);
1252  
mhi_sync_power_up(struct mhi_controller * mhi_cntrl)1253  int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1254  {
1255  	int ret = mhi_async_power_up(mhi_cntrl);
1256  	u32 timeout_ms;
1257  
1258  	if (ret)
1259  		return ret;
1260  
1261  	/* Some devices need more time to set ready during power up */
1262  	timeout_ms = mhi_cntrl->ready_timeout_ms ?
1263  		mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
1264  	wait_event_timeout(mhi_cntrl->state_event,
1265  			   MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1266  			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1267  			   msecs_to_jiffies(timeout_ms));
1268  
1269  	ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1270  	if (ret)
1271  		mhi_power_down(mhi_cntrl, false);
1272  
1273  	return ret;
1274  }
1275  EXPORT_SYMBOL(mhi_sync_power_up);
1276  
mhi_force_rddm_mode(struct mhi_controller * mhi_cntrl)1277  int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1278  {
1279  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1280  	int ret;
1281  
1282  	/* Check if device is already in RDDM */
1283  	if (mhi_cntrl->ee == MHI_EE_RDDM)
1284  		return 0;
1285  
1286  	dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1287  	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1288  
1289  	/* Wait for RDDM event */
1290  	ret = wait_event_timeout(mhi_cntrl->state_event,
1291  				 mhi_cntrl->ee == MHI_EE_RDDM,
1292  				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1293  	ret = ret ? 0 : -EIO;
1294  
1295  	return ret;
1296  }
1297  EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1298  
mhi_device_get(struct mhi_device * mhi_dev)1299  void mhi_device_get(struct mhi_device *mhi_dev)
1300  {
1301  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1302  
1303  	mhi_dev->dev_wake++;
1304  	read_lock_bh(&mhi_cntrl->pm_lock);
1305  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1306  		mhi_trigger_resume(mhi_cntrl);
1307  
1308  	mhi_cntrl->wake_get(mhi_cntrl, true);
1309  	read_unlock_bh(&mhi_cntrl->pm_lock);
1310  }
1311  EXPORT_SYMBOL_GPL(mhi_device_get);
1312  
mhi_device_get_sync(struct mhi_device * mhi_dev)1313  int mhi_device_get_sync(struct mhi_device *mhi_dev)
1314  {
1315  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1316  	int ret;
1317  
1318  	ret = __mhi_device_get_sync(mhi_cntrl);
1319  	if (!ret)
1320  		mhi_dev->dev_wake++;
1321  
1322  	return ret;
1323  }
1324  EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1325  
mhi_device_put(struct mhi_device * mhi_dev)1326  void mhi_device_put(struct mhi_device *mhi_dev)
1327  {
1328  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1329  
1330  	mhi_dev->dev_wake--;
1331  	read_lock_bh(&mhi_cntrl->pm_lock);
1332  	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1333  		mhi_trigger_resume(mhi_cntrl);
1334  
1335  	mhi_cntrl->wake_put(mhi_cntrl, false);
1336  	read_unlock_bh(&mhi_cntrl->pm_lock);
1337  }
1338  EXPORT_SYMBOL_GPL(mhi_device_put);
1339