1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4   *
5   */
6  
7  #include <linux/delay.h>
8  #include <linux/device.h>
9  #include <linux/dma-direction.h>
10  #include <linux/dma-mapping.h>
11  #include <linux/interrupt.h>
12  #include <linux/list.h>
13  #include <linux/mhi.h>
14  #include <linux/module.h>
15  #include <linux/skbuff.h>
16  #include <linux/slab.h>
17  #include "internal.h"
18  #include "trace.h"
19  
mhi_read_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 * out)20  int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
21  			      void __iomem *base, u32 offset, u32 *out)
22  {
23  	return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
24  }
25  
mhi_read_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 * out)26  int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
27  				    void __iomem *base, u32 offset,
28  				    u32 mask, u32 *out)
29  {
30  	u32 tmp;
31  	int ret;
32  
33  	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
34  	if (ret)
35  		return ret;
36  
37  	*out = (tmp & mask) >> __ffs(mask);
38  
39  	return 0;
40  }
41  
mhi_poll_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val,u32 delayus,u32 timeout_ms)42  int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
43  				    void __iomem *base, u32 offset,
44  				    u32 mask, u32 val, u32 delayus,
45  				    u32 timeout_ms)
46  {
47  	int ret;
48  	u32 out, retry = (timeout_ms * 1000) / delayus;
49  
50  	while (retry--) {
51  		ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
52  		if (ret)
53  			return ret;
54  
55  		if (out == val)
56  			return 0;
57  
58  		fsleep(delayus);
59  	}
60  
61  	return -ETIMEDOUT;
62  }
63  
mhi_write_reg(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 val)64  void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
65  		   u32 offset, u32 val)
66  {
67  	mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
68  }
69  
mhi_write_reg_field(struct mhi_controller * mhi_cntrl,void __iomem * base,u32 offset,u32 mask,u32 val)70  int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
71  				     void __iomem *base, u32 offset, u32 mask,
72  				     u32 val)
73  {
74  	int ret;
75  	u32 tmp;
76  
77  	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
78  	if (ret)
79  		return ret;
80  
81  	tmp &= ~mask;
82  	tmp |= (val << __ffs(mask));
83  	mhi_write_reg(mhi_cntrl, base, offset, tmp);
84  
85  	return 0;
86  }
87  
mhi_write_db(struct mhi_controller * mhi_cntrl,void __iomem * db_addr,dma_addr_t db_val)88  void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
89  		  dma_addr_t db_val)
90  {
91  	mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
92  	mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
93  }
94  
mhi_db_brstmode(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)95  void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
96  		     struct db_cfg *db_cfg,
97  		     void __iomem *db_addr,
98  		     dma_addr_t db_val)
99  {
100  	if (db_cfg->db_mode) {
101  		db_cfg->db_val = db_val;
102  		mhi_write_db(mhi_cntrl, db_addr, db_val);
103  		db_cfg->db_mode = 0;
104  	}
105  }
106  
mhi_db_brstmode_disable(struct mhi_controller * mhi_cntrl,struct db_cfg * db_cfg,void __iomem * db_addr,dma_addr_t db_val)107  void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
108  			     struct db_cfg *db_cfg,
109  			     void __iomem *db_addr,
110  			     dma_addr_t db_val)
111  {
112  	db_cfg->db_val = db_val;
113  	mhi_write_db(mhi_cntrl, db_addr, db_val);
114  }
115  
mhi_ring_er_db(struct mhi_event * mhi_event)116  void mhi_ring_er_db(struct mhi_event *mhi_event)
117  {
118  	struct mhi_ring *ring = &mhi_event->ring;
119  
120  	mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
121  				     ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
122  }
123  
mhi_ring_cmd_db(struct mhi_controller * mhi_cntrl,struct mhi_cmd * mhi_cmd)124  void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
125  {
126  	dma_addr_t db;
127  	struct mhi_ring *ring = &mhi_cmd->ring;
128  
129  	db = ring->iommu_base + (ring->wp - ring->base);
130  	*ring->ctxt_wp = cpu_to_le64(db);
131  	mhi_write_db(mhi_cntrl, ring->db_addr, db);
132  }
133  
mhi_ring_chan_db(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)134  void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
135  		      struct mhi_chan *mhi_chan)
136  {
137  	struct mhi_ring *ring = &mhi_chan->tre_ring;
138  	dma_addr_t db;
139  
140  	db = ring->iommu_base + (ring->wp - ring->base);
141  
142  	/*
143  	 * Writes to the new ring element must be visible to the hardware
144  	 * before letting h/w know there is new element to fetch.
145  	 */
146  	dma_wmb();
147  	*ring->ctxt_wp = cpu_to_le64(db);
148  
149  	mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
150  				    ring->db_addr, db);
151  }
152  
mhi_get_exec_env(struct mhi_controller * mhi_cntrl)153  enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
154  {
155  	u32 exec;
156  	int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
157  
158  	return (ret) ? MHI_EE_MAX : exec;
159  }
160  EXPORT_SYMBOL_GPL(mhi_get_exec_env);
161  
mhi_get_mhi_state(struct mhi_controller * mhi_cntrl)162  enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
163  {
164  	u32 state;
165  	int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
166  				     MHISTATUS_MHISTATE_MASK, &state);
167  	return ret ? MHI_STATE_MAX : state;
168  }
169  EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
170  
mhi_soc_reset(struct mhi_controller * mhi_cntrl)171  void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
172  {
173  	if (mhi_cntrl->reset) {
174  		mhi_cntrl->reset(mhi_cntrl);
175  		return;
176  	}
177  
178  	/* Generic MHI SoC reset */
179  	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
180  		      MHI_SOC_RESET_REQ);
181  }
182  EXPORT_SYMBOL_GPL(mhi_soc_reset);
183  
mhi_map_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)184  int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
185  			 struct mhi_buf_info *buf_info)
186  {
187  	buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
188  					  buf_info->v_addr, buf_info->len,
189  					  buf_info->dir);
190  	if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
191  		return -ENOMEM;
192  
193  	return 0;
194  }
195  
mhi_map_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)196  int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
197  			  struct mhi_buf_info *buf_info)
198  {
199  	void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
200  				       &buf_info->p_addr, GFP_ATOMIC);
201  
202  	if (!buf)
203  		return -ENOMEM;
204  
205  	if (buf_info->dir == DMA_TO_DEVICE)
206  		memcpy(buf, buf_info->v_addr, buf_info->len);
207  
208  	buf_info->bb_addr = buf;
209  
210  	return 0;
211  }
212  
mhi_unmap_single_no_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)213  void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
214  			    struct mhi_buf_info *buf_info)
215  {
216  	dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
217  			 buf_info->dir);
218  }
219  
mhi_unmap_single_use_bb(struct mhi_controller * mhi_cntrl,struct mhi_buf_info * buf_info)220  void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
221  			     struct mhi_buf_info *buf_info)
222  {
223  	if (buf_info->dir == DMA_FROM_DEVICE)
224  		memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
225  
226  	dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
227  			  buf_info->bb_addr, buf_info->p_addr);
228  }
229  
get_nr_avail_ring_elements(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)230  static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
231  				      struct mhi_ring *ring)
232  {
233  	int nr_el;
234  
235  	if (ring->wp < ring->rp) {
236  		nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
237  	} else {
238  		nr_el = (ring->rp - ring->base) / ring->el_size;
239  		nr_el += ((ring->base + ring->len - ring->wp) /
240  			  ring->el_size) - 1;
241  	}
242  
243  	return nr_el;
244  }
245  
mhi_to_virtual(struct mhi_ring * ring,dma_addr_t addr)246  static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
247  {
248  	return (addr - ring->iommu_base) + ring->base;
249  }
250  
mhi_add_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)251  static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
252  				 struct mhi_ring *ring)
253  {
254  	ring->wp += ring->el_size;
255  	if (ring->wp >= (ring->base + ring->len))
256  		ring->wp = ring->base;
257  	/* smp update */
258  	smp_wmb();
259  }
260  
mhi_del_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)261  static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
262  				 struct mhi_ring *ring)
263  {
264  	ring->rp += ring->el_size;
265  	if (ring->rp >= (ring->base + ring->len))
266  		ring->rp = ring->base;
267  	/* smp update */
268  	smp_wmb();
269  }
270  
is_valid_ring_ptr(struct mhi_ring * ring,dma_addr_t addr)271  static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
272  {
273  	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
274  			!(addr & (sizeof(struct mhi_ring_element) - 1));
275  }
276  
mhi_destroy_device(struct device * dev,void * data)277  int mhi_destroy_device(struct device *dev, void *data)
278  {
279  	struct mhi_chan *ul_chan, *dl_chan;
280  	struct mhi_device *mhi_dev;
281  	struct mhi_controller *mhi_cntrl;
282  	enum mhi_ee_type ee = MHI_EE_MAX;
283  
284  	if (dev->bus != &mhi_bus_type)
285  		return 0;
286  
287  	mhi_dev = to_mhi_device(dev);
288  	mhi_cntrl = mhi_dev->mhi_cntrl;
289  
290  	/* Only destroy virtual devices thats attached to bus */
291  	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
292  		return 0;
293  
294  	ul_chan = mhi_dev->ul_chan;
295  	dl_chan = mhi_dev->dl_chan;
296  
297  	/*
298  	 * If execution environment is specified, remove only those devices that
299  	 * started in them based on ee_mask for the channels as we move on to a
300  	 * different execution environment
301  	 */
302  	if (data)
303  		ee = *(enum mhi_ee_type *)data;
304  
305  	/*
306  	 * For the suspend and resume case, this function will get called
307  	 * without mhi_unregister_controller(). Hence, we need to drop the
308  	 * references to mhi_dev created for ul and dl channels. We can
309  	 * be sure that there will be no instances of mhi_dev left after
310  	 * this.
311  	 */
312  	if (ul_chan) {
313  		if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
314  			return 0;
315  
316  		put_device(&ul_chan->mhi_dev->dev);
317  	}
318  
319  	if (dl_chan) {
320  		if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
321  			return 0;
322  
323  		put_device(&dl_chan->mhi_dev->dev);
324  	}
325  
326  	dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
327  		 mhi_dev->name);
328  
329  	/* Notify the client and remove the device from MHI bus */
330  	device_del(dev);
331  	put_device(dev);
332  
333  	return 0;
334  }
335  
mhi_get_free_desc_count(struct mhi_device * mhi_dev,enum dma_data_direction dir)336  int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
337  				enum dma_data_direction dir)
338  {
339  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
340  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
341  		mhi_dev->ul_chan : mhi_dev->dl_chan;
342  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
343  
344  	return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
345  }
346  EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
347  
mhi_notify(struct mhi_device * mhi_dev,enum mhi_callback cb_reason)348  void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
349  {
350  	struct mhi_driver *mhi_drv;
351  
352  	if (!mhi_dev->dev.driver)
353  		return;
354  
355  	mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
356  
357  	if (mhi_drv->status_cb)
358  		mhi_drv->status_cb(mhi_dev, cb_reason);
359  }
360  EXPORT_SYMBOL_GPL(mhi_notify);
361  
362  /* Bind MHI channels to MHI devices */
mhi_create_devices(struct mhi_controller * mhi_cntrl)363  void mhi_create_devices(struct mhi_controller *mhi_cntrl)
364  {
365  	struct mhi_chan *mhi_chan;
366  	struct mhi_device *mhi_dev;
367  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
368  	int i, ret;
369  
370  	mhi_chan = mhi_cntrl->mhi_chan;
371  	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
372  		if (!mhi_chan->configured || mhi_chan->mhi_dev ||
373  		    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
374  			continue;
375  		mhi_dev = mhi_alloc_device(mhi_cntrl);
376  		if (IS_ERR(mhi_dev))
377  			return;
378  
379  		mhi_dev->dev_type = MHI_DEVICE_XFER;
380  		switch (mhi_chan->dir) {
381  		case DMA_TO_DEVICE:
382  			mhi_dev->ul_chan = mhi_chan;
383  			mhi_dev->ul_chan_id = mhi_chan->chan;
384  			break;
385  		case DMA_FROM_DEVICE:
386  			/* We use dl_chan as offload channels */
387  			mhi_dev->dl_chan = mhi_chan;
388  			mhi_dev->dl_chan_id = mhi_chan->chan;
389  			break;
390  		default:
391  			dev_err(dev, "Direction not supported\n");
392  			put_device(&mhi_dev->dev);
393  			return;
394  		}
395  
396  		get_device(&mhi_dev->dev);
397  		mhi_chan->mhi_dev = mhi_dev;
398  
399  		/* Check next channel if it matches */
400  		if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
401  			if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
402  				i++;
403  				mhi_chan++;
404  				if (mhi_chan->dir == DMA_TO_DEVICE) {
405  					mhi_dev->ul_chan = mhi_chan;
406  					mhi_dev->ul_chan_id = mhi_chan->chan;
407  				} else {
408  					mhi_dev->dl_chan = mhi_chan;
409  					mhi_dev->dl_chan_id = mhi_chan->chan;
410  				}
411  				get_device(&mhi_dev->dev);
412  				mhi_chan->mhi_dev = mhi_dev;
413  			}
414  		}
415  
416  		/* Channel name is same for both UL and DL */
417  		mhi_dev->name = mhi_chan->name;
418  		dev_set_name(&mhi_dev->dev, "%s_%s",
419  			     dev_name(&mhi_cntrl->mhi_dev->dev),
420  			     mhi_dev->name);
421  
422  		/* Init wakeup source if available */
423  		if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
424  			device_init_wakeup(&mhi_dev->dev, true);
425  
426  		ret = device_add(&mhi_dev->dev);
427  		if (ret)
428  			put_device(&mhi_dev->dev);
429  	}
430  }
431  
mhi_irq_handler(int irq_number,void * dev)432  irqreturn_t mhi_irq_handler(int irq_number, void *dev)
433  {
434  	struct mhi_event *mhi_event = dev;
435  	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
436  	struct mhi_event_ctxt *er_ctxt;
437  	struct mhi_ring *ev_ring = &mhi_event->ring;
438  	dma_addr_t ptr;
439  	void *dev_rp;
440  
441  	/*
442  	 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
443  	 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
444  	 * before handling the IRQs.
445  	 */
446  	if (!mhi_cntrl->mhi_ctxt) {
447  		dev_dbg(&mhi_cntrl->mhi_dev->dev,
448  			"mhi_ctxt has been freed\n");
449  		return IRQ_HANDLED;
450  	}
451  
452  	er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
453  	ptr = le64_to_cpu(er_ctxt->rp);
454  
455  	if (!is_valid_ring_ptr(ev_ring, ptr)) {
456  		dev_err(&mhi_cntrl->mhi_dev->dev,
457  			"Event ring rp points outside of the event ring\n");
458  		return IRQ_HANDLED;
459  	}
460  
461  	dev_rp = mhi_to_virtual(ev_ring, ptr);
462  
463  	/* Only proceed if event ring has pending events */
464  	if (ev_ring->rp == dev_rp)
465  		return IRQ_HANDLED;
466  
467  	/* For client managed event ring, notify pending data */
468  	if (mhi_event->cl_manage) {
469  		struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
470  		struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
471  
472  		if (mhi_dev)
473  			mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
474  	} else {
475  		tasklet_schedule(&mhi_event->task);
476  	}
477  
478  	return IRQ_HANDLED;
479  }
480  
mhi_intvec_threaded_handler(int irq_number,void * priv)481  irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
482  {
483  	struct mhi_controller *mhi_cntrl = priv;
484  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
485  	enum mhi_state state;
486  	enum mhi_pm_state pm_state = 0;
487  	enum mhi_ee_type ee;
488  
489  	write_lock_irq(&mhi_cntrl->pm_lock);
490  	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
491  		write_unlock_irq(&mhi_cntrl->pm_lock);
492  		goto exit_intvec;
493  	}
494  
495  	state = mhi_get_mhi_state(mhi_cntrl);
496  	ee = mhi_get_exec_env(mhi_cntrl);
497  
498  	trace_mhi_intvec_states(mhi_cntrl, ee, state);
499  	if (state == MHI_STATE_SYS_ERR) {
500  		dev_dbg(dev, "System error detected\n");
501  		pm_state = mhi_tryset_pm_state(mhi_cntrl,
502  					       MHI_PM_SYS_ERR_DETECT);
503  	}
504  	write_unlock_irq(&mhi_cntrl->pm_lock);
505  
506  	if (pm_state != MHI_PM_SYS_ERR_DETECT)
507  		goto exit_intvec;
508  
509  	switch (ee) {
510  	case MHI_EE_RDDM:
511  		/* proceed if power down is not already in progress */
512  		if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
513  			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
514  			mhi_cntrl->ee = ee;
515  			wake_up_all(&mhi_cntrl->state_event);
516  		}
517  		break;
518  	case MHI_EE_PBL:
519  	case MHI_EE_EDL:
520  	case MHI_EE_PTHRU:
521  		mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
522  		mhi_cntrl->ee = ee;
523  		wake_up_all(&mhi_cntrl->state_event);
524  		mhi_pm_sys_err_handler(mhi_cntrl);
525  		break;
526  	default:
527  		wake_up_all(&mhi_cntrl->state_event);
528  		mhi_pm_sys_err_handler(mhi_cntrl);
529  		break;
530  	}
531  
532  exit_intvec:
533  
534  	return IRQ_HANDLED;
535  }
536  
mhi_intvec_handler(int irq_number,void * dev)537  irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
538  {
539  	struct mhi_controller *mhi_cntrl = dev;
540  
541  	/* Wake up events waiting for state change */
542  	wake_up_all(&mhi_cntrl->state_event);
543  
544  	return IRQ_WAKE_THREAD;
545  }
546  
mhi_recycle_ev_ring_element(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)547  static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
548  					struct mhi_ring *ring)
549  {
550  	/* Update the WP */
551  	ring->wp += ring->el_size;
552  
553  	if (ring->wp >= (ring->base + ring->len))
554  		ring->wp = ring->base;
555  
556  	*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
557  
558  	/* Update the RP */
559  	ring->rp += ring->el_size;
560  	if (ring->rp >= (ring->base + ring->len))
561  		ring->rp = ring->base;
562  
563  	/* Update to all cores */
564  	smp_wmb();
565  }
566  
parse_xfer_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)567  static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
568  			    struct mhi_ring_element *event,
569  			    struct mhi_chan *mhi_chan)
570  {
571  	struct mhi_ring *buf_ring, *tre_ring;
572  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
573  	struct mhi_result result;
574  	unsigned long flags = 0;
575  	u32 ev_code;
576  
577  	ev_code = MHI_TRE_GET_EV_CODE(event);
578  	buf_ring = &mhi_chan->buf_ring;
579  	tre_ring = &mhi_chan->tre_ring;
580  
581  	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
582  		-EOVERFLOW : 0;
583  
584  	/*
585  	 * If it's a DB Event then we need to grab the lock
586  	 * with preemption disabled and as a write because we
587  	 * have to update db register and there are chances that
588  	 * another thread could be doing the same.
589  	 */
590  	if (ev_code >= MHI_EV_CC_OOB)
591  		write_lock_irqsave(&mhi_chan->lock, flags);
592  	else
593  		read_lock_bh(&mhi_chan->lock);
594  
595  	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
596  		goto end_process_tx_event;
597  
598  	switch (ev_code) {
599  	case MHI_EV_CC_OVERFLOW:
600  	case MHI_EV_CC_EOB:
601  	case MHI_EV_CC_EOT:
602  	{
603  		dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
604  		struct mhi_ring_element *local_rp, *ev_tre;
605  		void *dev_rp;
606  		struct mhi_buf_info *buf_info;
607  		u16 xfer_len;
608  
609  		if (!is_valid_ring_ptr(tre_ring, ptr)) {
610  			dev_err(&mhi_cntrl->mhi_dev->dev,
611  				"Event element points outside of the tre ring\n");
612  			break;
613  		}
614  		/* Get the TRB this event points to */
615  		ev_tre = mhi_to_virtual(tre_ring, ptr);
616  
617  		dev_rp = ev_tre + 1;
618  		if (dev_rp >= (tre_ring->base + tre_ring->len))
619  			dev_rp = tre_ring->base;
620  
621  		result.dir = mhi_chan->dir;
622  
623  		local_rp = tre_ring->rp;
624  		while (local_rp != dev_rp) {
625  			buf_info = buf_ring->rp;
626  			/* If it's the last TRE, get length from the event */
627  			if (local_rp == ev_tre)
628  				xfer_len = MHI_TRE_GET_EV_LEN(event);
629  			else
630  				xfer_len = buf_info->len;
631  
632  			/* Unmap if it's not pre-mapped by client */
633  			if (likely(!buf_info->pre_mapped))
634  				mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
635  
636  			result.buf_addr = buf_info->cb_buf;
637  
638  			/* truncate to buf len if xfer_len is larger */
639  			result.bytes_xferd =
640  				min_t(u16, xfer_len, buf_info->len);
641  			mhi_del_ring_element(mhi_cntrl, buf_ring);
642  			mhi_del_ring_element(mhi_cntrl, tre_ring);
643  			local_rp = tre_ring->rp;
644  
645  			read_unlock_bh(&mhi_chan->lock);
646  
647  			/* notify client */
648  			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
649  
650  			if (mhi_chan->dir == DMA_TO_DEVICE) {
651  				atomic_dec(&mhi_cntrl->pending_pkts);
652  				/* Release the reference got from mhi_queue() */
653  				mhi_cntrl->runtime_put(mhi_cntrl);
654  			}
655  
656  			/*
657  			 * Recycle the buffer if buffer is pre-allocated,
658  			 * if there is an error, not much we can do apart
659  			 * from dropping the packet
660  			 */
661  			if (mhi_chan->pre_alloc) {
662  				if (mhi_queue_buf(mhi_chan->mhi_dev,
663  						  mhi_chan->dir,
664  						  buf_info->cb_buf,
665  						  buf_info->len, MHI_EOT)) {
666  					dev_err(dev,
667  						"Error recycling buffer for chan:%d\n",
668  						mhi_chan->chan);
669  					kfree(buf_info->cb_buf);
670  				}
671  			}
672  
673  			read_lock_bh(&mhi_chan->lock);
674  		}
675  		break;
676  	} /* CC_EOT */
677  	case MHI_EV_CC_OOB:
678  	case MHI_EV_CC_DB_MODE:
679  	{
680  		unsigned long pm_lock_flags;
681  
682  		mhi_chan->db_cfg.db_mode = 1;
683  		read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
684  		if (tre_ring->wp != tre_ring->rp &&
685  		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
686  			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
687  		}
688  		read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
689  		break;
690  	}
691  	case MHI_EV_CC_BAD_TRE:
692  	default:
693  		dev_err(dev, "Unknown event 0x%x\n", ev_code);
694  		break;
695  	} /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
696  
697  end_process_tx_event:
698  	if (ev_code >= MHI_EV_CC_OOB)
699  		write_unlock_irqrestore(&mhi_chan->lock, flags);
700  	else
701  		read_unlock_bh(&mhi_chan->lock);
702  
703  	return 0;
704  }
705  
parse_rsc_event(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * event,struct mhi_chan * mhi_chan)706  static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
707  			   struct mhi_ring_element *event,
708  			   struct mhi_chan *mhi_chan)
709  {
710  	struct mhi_ring *buf_ring, *tre_ring;
711  	struct mhi_buf_info *buf_info;
712  	struct mhi_result result;
713  	int ev_code;
714  	u32 cookie; /* offset to local descriptor */
715  	u16 xfer_len;
716  
717  	buf_ring = &mhi_chan->buf_ring;
718  	tre_ring = &mhi_chan->tre_ring;
719  
720  	ev_code = MHI_TRE_GET_EV_CODE(event);
721  	cookie = MHI_TRE_GET_EV_COOKIE(event);
722  	xfer_len = MHI_TRE_GET_EV_LEN(event);
723  
724  	/* Received out of bound cookie */
725  	WARN_ON(cookie >= buf_ring->len);
726  
727  	buf_info = buf_ring->base + cookie;
728  
729  	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
730  		-EOVERFLOW : 0;
731  
732  	/* truncate to buf len if xfer_len is larger */
733  	result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
734  	result.buf_addr = buf_info->cb_buf;
735  	result.dir = mhi_chan->dir;
736  
737  	read_lock_bh(&mhi_chan->lock);
738  
739  	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
740  		goto end_process_rsc_event;
741  
742  	WARN_ON(!buf_info->used);
743  
744  	/* notify the client */
745  	mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
746  
747  	/*
748  	 * Note: We're arbitrarily incrementing RP even though, completion
749  	 * packet we processed might not be the same one, reason we can do this
750  	 * is because device guaranteed to cache descriptors in order it
751  	 * receive, so even though completion event is different we can re-use
752  	 * all descriptors in between.
753  	 * Example:
754  	 * Transfer Ring has descriptors: A, B, C, D
755  	 * Last descriptor host queue is D (WP) and first descriptor
756  	 * host queue is A (RP).
757  	 * The completion event we just serviced is descriptor C.
758  	 * Then we can safely queue descriptors to replace A, B, and C
759  	 * even though host did not receive any completions.
760  	 */
761  	mhi_del_ring_element(mhi_cntrl, tre_ring);
762  	buf_info->used = false;
763  
764  end_process_rsc_event:
765  	read_unlock_bh(&mhi_chan->lock);
766  
767  	return 0;
768  }
769  
mhi_process_cmd_completion(struct mhi_controller * mhi_cntrl,struct mhi_ring_element * tre)770  static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
771  				       struct mhi_ring_element *tre)
772  {
773  	dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
774  	struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
775  	struct mhi_ring *mhi_ring = &cmd_ring->ring;
776  	struct mhi_ring_element *cmd_pkt;
777  	struct mhi_chan *mhi_chan;
778  	u32 chan;
779  
780  	if (!is_valid_ring_ptr(mhi_ring, ptr)) {
781  		dev_err(&mhi_cntrl->mhi_dev->dev,
782  			"Event element points outside of the cmd ring\n");
783  		return;
784  	}
785  
786  	cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
787  
788  	chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
789  
790  	if (chan < mhi_cntrl->max_chan &&
791  	    mhi_cntrl->mhi_chan[chan].configured) {
792  		mhi_chan = &mhi_cntrl->mhi_chan[chan];
793  		write_lock_bh(&mhi_chan->lock);
794  		mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
795  		complete(&mhi_chan->completion);
796  		write_unlock_bh(&mhi_chan->lock);
797  	} else {
798  		dev_err(&mhi_cntrl->mhi_dev->dev,
799  			"Completion packet for invalid channel ID: %d\n", chan);
800  	}
801  
802  	mhi_del_ring_element(mhi_cntrl, mhi_ring);
803  }
804  
mhi_process_ctrl_ev_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)805  int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
806  			     struct mhi_event *mhi_event,
807  			     u32 event_quota)
808  {
809  	struct mhi_ring_element *dev_rp, *local_rp;
810  	struct mhi_ring *ev_ring = &mhi_event->ring;
811  	struct mhi_event_ctxt *er_ctxt =
812  		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
813  	struct mhi_chan *mhi_chan;
814  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
815  	u32 chan;
816  	int count = 0;
817  	dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
818  
819  	/*
820  	 * This is a quick check to avoid unnecessary event processing
821  	 * in case MHI is already in error state, but it's still possible
822  	 * to transition to error state while processing events
823  	 */
824  	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
825  		return -EIO;
826  
827  	if (!is_valid_ring_ptr(ev_ring, ptr)) {
828  		dev_err(&mhi_cntrl->mhi_dev->dev,
829  			"Event ring rp points outside of the event ring\n");
830  		return -EIO;
831  	}
832  
833  	dev_rp = mhi_to_virtual(ev_ring, ptr);
834  	local_rp = ev_ring->rp;
835  
836  	while (dev_rp != local_rp) {
837  		enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
838  
839  		trace_mhi_ctrl_event(mhi_cntrl, local_rp);
840  
841  		switch (type) {
842  		case MHI_PKT_TYPE_BW_REQ_EVENT:
843  		{
844  			struct mhi_link_info *link_info;
845  
846  			link_info = &mhi_cntrl->mhi_link_info;
847  			write_lock_irq(&mhi_cntrl->pm_lock);
848  			link_info->target_link_speed =
849  				MHI_TRE_GET_EV_LINKSPEED(local_rp);
850  			link_info->target_link_width =
851  				MHI_TRE_GET_EV_LINKWIDTH(local_rp);
852  			write_unlock_irq(&mhi_cntrl->pm_lock);
853  			dev_dbg(dev, "Received BW_REQ event\n");
854  			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
855  			break;
856  		}
857  		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
858  		{
859  			enum mhi_state new_state;
860  
861  			new_state = MHI_TRE_GET_EV_STATE(local_rp);
862  
863  			dev_dbg(dev, "State change event to state: %s\n",
864  				mhi_state_str(new_state));
865  
866  			switch (new_state) {
867  			case MHI_STATE_M0:
868  				mhi_pm_m0_transition(mhi_cntrl);
869  				break;
870  			case MHI_STATE_M1:
871  				mhi_pm_m1_transition(mhi_cntrl);
872  				break;
873  			case MHI_STATE_M3:
874  				mhi_pm_m3_transition(mhi_cntrl);
875  				break;
876  			case MHI_STATE_SYS_ERR:
877  			{
878  				enum mhi_pm_state pm_state;
879  
880  				dev_dbg(dev, "System error detected\n");
881  				write_lock_irq(&mhi_cntrl->pm_lock);
882  				pm_state = mhi_tryset_pm_state(mhi_cntrl,
883  							MHI_PM_SYS_ERR_DETECT);
884  				write_unlock_irq(&mhi_cntrl->pm_lock);
885  				if (pm_state == MHI_PM_SYS_ERR_DETECT)
886  					mhi_pm_sys_err_handler(mhi_cntrl);
887  				break;
888  			}
889  			default:
890  				dev_err(dev, "Invalid state: %s\n",
891  					mhi_state_str(new_state));
892  			}
893  
894  			break;
895  		}
896  		case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
897  			mhi_process_cmd_completion(mhi_cntrl, local_rp);
898  			break;
899  		case MHI_PKT_TYPE_EE_EVENT:
900  		{
901  			enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
902  			enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
903  
904  			dev_dbg(dev, "Received EE event: %s\n",
905  				TO_MHI_EXEC_STR(event));
906  			switch (event) {
907  			case MHI_EE_SBL:
908  				st = DEV_ST_TRANSITION_SBL;
909  				break;
910  			case MHI_EE_WFW:
911  			case MHI_EE_AMSS:
912  				st = DEV_ST_TRANSITION_MISSION_MODE;
913  				break;
914  			case MHI_EE_FP:
915  				st = DEV_ST_TRANSITION_FP;
916  				break;
917  			case MHI_EE_RDDM:
918  				mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
919  				write_lock_irq(&mhi_cntrl->pm_lock);
920  				mhi_cntrl->ee = event;
921  				write_unlock_irq(&mhi_cntrl->pm_lock);
922  				wake_up_all(&mhi_cntrl->state_event);
923  				break;
924  			default:
925  				dev_err(dev,
926  					"Unhandled EE event: 0x%x\n", type);
927  			}
928  			if (st != DEV_ST_TRANSITION_MAX)
929  				mhi_queue_state_transition(mhi_cntrl, st);
930  
931  			break;
932  		}
933  		case MHI_PKT_TYPE_TX_EVENT:
934  			chan = MHI_TRE_GET_EV_CHID(local_rp);
935  
936  			WARN_ON(chan >= mhi_cntrl->max_chan);
937  
938  			/*
939  			 * Only process the event ring elements whose channel
940  			 * ID is within the maximum supported range.
941  			 */
942  			if (chan < mhi_cntrl->max_chan) {
943  				mhi_chan = &mhi_cntrl->mhi_chan[chan];
944  				if (!mhi_chan->configured)
945  					break;
946  				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
947  			}
948  			break;
949  		default:
950  			dev_err(dev, "Unhandled event type: %d\n", type);
951  			break;
952  		}
953  
954  		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
955  		local_rp = ev_ring->rp;
956  
957  		ptr = le64_to_cpu(er_ctxt->rp);
958  		if (!is_valid_ring_ptr(ev_ring, ptr)) {
959  			dev_err(&mhi_cntrl->mhi_dev->dev,
960  				"Event ring rp points outside of the event ring\n");
961  			return -EIO;
962  		}
963  
964  		dev_rp = mhi_to_virtual(ev_ring, ptr);
965  		count++;
966  	}
967  
968  	read_lock_bh(&mhi_cntrl->pm_lock);
969  
970  	/* Ring EV DB only if there is any pending element to process */
971  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
972  		mhi_ring_er_db(mhi_event);
973  	read_unlock_bh(&mhi_cntrl->pm_lock);
974  
975  	return count;
976  }
977  
mhi_process_data_event_ring(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,u32 event_quota)978  int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
979  				struct mhi_event *mhi_event,
980  				u32 event_quota)
981  {
982  	struct mhi_ring_element *dev_rp, *local_rp;
983  	struct mhi_ring *ev_ring = &mhi_event->ring;
984  	struct mhi_event_ctxt *er_ctxt =
985  		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
986  	int count = 0;
987  	u32 chan;
988  	struct mhi_chan *mhi_chan;
989  	dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
990  
991  	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
992  		return -EIO;
993  
994  	if (!is_valid_ring_ptr(ev_ring, ptr)) {
995  		dev_err(&mhi_cntrl->mhi_dev->dev,
996  			"Event ring rp points outside of the event ring\n");
997  		return -EIO;
998  	}
999  
1000  	dev_rp = mhi_to_virtual(ev_ring, ptr);
1001  	local_rp = ev_ring->rp;
1002  
1003  	while (dev_rp != local_rp && event_quota > 0) {
1004  		enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
1005  
1006  		trace_mhi_data_event(mhi_cntrl, local_rp);
1007  
1008  		chan = MHI_TRE_GET_EV_CHID(local_rp);
1009  
1010  		WARN_ON(chan >= mhi_cntrl->max_chan);
1011  
1012  		/*
1013  		 * Only process the event ring elements whose channel
1014  		 * ID is within the maximum supported range.
1015  		 */
1016  		if (chan < mhi_cntrl->max_chan &&
1017  		    mhi_cntrl->mhi_chan[chan].configured) {
1018  			mhi_chan = &mhi_cntrl->mhi_chan[chan];
1019  
1020  			if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
1021  				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
1022  				event_quota--;
1023  			} else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
1024  				parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
1025  				event_quota--;
1026  			}
1027  		}
1028  
1029  		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
1030  		local_rp = ev_ring->rp;
1031  
1032  		ptr = le64_to_cpu(er_ctxt->rp);
1033  		if (!is_valid_ring_ptr(ev_ring, ptr)) {
1034  			dev_err(&mhi_cntrl->mhi_dev->dev,
1035  				"Event ring rp points outside of the event ring\n");
1036  			return -EIO;
1037  		}
1038  
1039  		dev_rp = mhi_to_virtual(ev_ring, ptr);
1040  		count++;
1041  	}
1042  	read_lock_bh(&mhi_cntrl->pm_lock);
1043  
1044  	/* Ring EV DB only if there is any pending element to process */
1045  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
1046  		mhi_ring_er_db(mhi_event);
1047  	read_unlock_bh(&mhi_cntrl->pm_lock);
1048  
1049  	return count;
1050  }
1051  
mhi_ev_task(unsigned long data)1052  void mhi_ev_task(unsigned long data)
1053  {
1054  	struct mhi_event *mhi_event = (struct mhi_event *)data;
1055  	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1056  
1057  	/* process all pending events */
1058  	spin_lock_bh(&mhi_event->lock);
1059  	mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1060  	spin_unlock_bh(&mhi_event->lock);
1061  }
1062  
mhi_ctrl_ev_task(unsigned long data)1063  void mhi_ctrl_ev_task(unsigned long data)
1064  {
1065  	struct mhi_event *mhi_event = (struct mhi_event *)data;
1066  	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
1067  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1068  	enum mhi_state state;
1069  	enum mhi_pm_state pm_state = 0;
1070  	int ret;
1071  
1072  	/*
1073  	 * We can check PM state w/o a lock here because there is no way
1074  	 * PM state can change from reg access valid to no access while this
1075  	 * thread being executed.
1076  	 */
1077  	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
1078  		/*
1079  		 * We may have a pending event but not allowed to
1080  		 * process it since we are probably in a suspended state,
1081  		 * so trigger a resume.
1082  		 */
1083  		mhi_trigger_resume(mhi_cntrl);
1084  
1085  		return;
1086  	}
1087  
1088  	/* Process ctrl events */
1089  	ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
1090  
1091  	/*
1092  	 * We received an IRQ but no events to process, maybe device went to
1093  	 * SYS_ERR state? Check the state to confirm.
1094  	 */
1095  	if (!ret) {
1096  		write_lock_irq(&mhi_cntrl->pm_lock);
1097  		state = mhi_get_mhi_state(mhi_cntrl);
1098  		if (state == MHI_STATE_SYS_ERR) {
1099  			dev_dbg(dev, "System error detected\n");
1100  			pm_state = mhi_tryset_pm_state(mhi_cntrl,
1101  						       MHI_PM_SYS_ERR_DETECT);
1102  		}
1103  		write_unlock_irq(&mhi_cntrl->pm_lock);
1104  		if (pm_state == MHI_PM_SYS_ERR_DETECT)
1105  			mhi_pm_sys_err_handler(mhi_cntrl);
1106  	}
1107  }
1108  
mhi_is_ring_full(struct mhi_controller * mhi_cntrl,struct mhi_ring * ring)1109  static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
1110  			     struct mhi_ring *ring)
1111  {
1112  	void *tmp = ring->wp + ring->el_size;
1113  
1114  	if (tmp >= (ring->base + ring->len))
1115  		tmp = ring->base;
1116  
1117  	return (tmp == ring->rp);
1118  }
1119  
mhi_queue(struct mhi_device * mhi_dev,struct mhi_buf_info * buf_info,enum dma_data_direction dir,enum mhi_flags mflags)1120  static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1121  		     enum dma_data_direction dir, enum mhi_flags mflags)
1122  {
1123  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1124  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1125  							     mhi_dev->dl_chan;
1126  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1127  	unsigned long flags;
1128  	int ret;
1129  
1130  	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1131  		return -EIO;
1132  
1133  	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1134  	if (unlikely(ret))
1135  		return -EAGAIN;
1136  
1137  	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1138  	if (unlikely(ret))
1139  		return ret;
1140  
1141  	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1142  
1143  	/* Packet is queued, take a usage ref to exit M3 if necessary
1144  	 * for host->device buffer, balanced put is done on buffer completion
1145  	 * for device->host buffer, balanced put is after ringing the DB
1146  	 */
1147  	mhi_cntrl->runtime_get(mhi_cntrl);
1148  
1149  	/* Assert dev_wake (to exit/prevent M1/M2)*/
1150  	mhi_cntrl->wake_toggle(mhi_cntrl);
1151  
1152  	if (mhi_chan->dir == DMA_TO_DEVICE)
1153  		atomic_inc(&mhi_cntrl->pending_pkts);
1154  
1155  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1156  		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1157  
1158  	if (dir == DMA_FROM_DEVICE)
1159  		mhi_cntrl->runtime_put(mhi_cntrl);
1160  
1161  	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1162  
1163  	return ret;
1164  }
1165  
mhi_queue_skb(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct sk_buff * skb,size_t len,enum mhi_flags mflags)1166  int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1167  		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1168  {
1169  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1170  							     mhi_dev->dl_chan;
1171  	struct mhi_buf_info buf_info = { };
1172  
1173  	buf_info.v_addr = skb->data;
1174  	buf_info.cb_buf = skb;
1175  	buf_info.len = len;
1176  
1177  	if (unlikely(mhi_chan->pre_alloc))
1178  		return -EINVAL;
1179  
1180  	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1181  }
1182  EXPORT_SYMBOL_GPL(mhi_queue_skb);
1183  
mhi_queue_dma(struct mhi_device * mhi_dev,enum dma_data_direction dir,struct mhi_buf * mhi_buf,size_t len,enum mhi_flags mflags)1184  int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1185  		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1186  {
1187  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1188  							     mhi_dev->dl_chan;
1189  	struct mhi_buf_info buf_info = { };
1190  
1191  	buf_info.p_addr = mhi_buf->dma_addr;
1192  	buf_info.cb_buf = mhi_buf;
1193  	buf_info.pre_mapped = true;
1194  	buf_info.len = len;
1195  
1196  	if (unlikely(mhi_chan->pre_alloc))
1197  		return -EINVAL;
1198  
1199  	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1200  }
1201  EXPORT_SYMBOL_GPL(mhi_queue_dma);
1202  
mhi_gen_tre(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,struct mhi_buf_info * info,enum mhi_flags flags)1203  int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1204  			struct mhi_buf_info *info, enum mhi_flags flags)
1205  {
1206  	struct mhi_ring *buf_ring, *tre_ring;
1207  	struct mhi_ring_element *mhi_tre;
1208  	struct mhi_buf_info *buf_info;
1209  	int eot, eob, chain, bei;
1210  	int ret;
1211  
1212  	/* Protect accesses for reading and incrementing WP */
1213  	write_lock_bh(&mhi_chan->lock);
1214  
1215  	buf_ring = &mhi_chan->buf_ring;
1216  	tre_ring = &mhi_chan->tre_ring;
1217  
1218  	buf_info = buf_ring->wp;
1219  	WARN_ON(buf_info->used);
1220  	buf_info->pre_mapped = info->pre_mapped;
1221  	if (info->pre_mapped)
1222  		buf_info->p_addr = info->p_addr;
1223  	else
1224  		buf_info->v_addr = info->v_addr;
1225  	buf_info->cb_buf = info->cb_buf;
1226  	buf_info->wp = tre_ring->wp;
1227  	buf_info->dir = mhi_chan->dir;
1228  	buf_info->len = info->len;
1229  
1230  	if (!info->pre_mapped) {
1231  		ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1232  		if (ret) {
1233  			write_unlock_bh(&mhi_chan->lock);
1234  			return ret;
1235  		}
1236  	}
1237  
1238  	eob = !!(flags & MHI_EOB);
1239  	eot = !!(flags & MHI_EOT);
1240  	chain = !!(flags & MHI_CHAIN);
1241  	bei = !!(mhi_chan->intmod);
1242  
1243  	mhi_tre = tre_ring->wp;
1244  	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1245  	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1246  	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1247  
1248  	trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
1249  	/* increment WP */
1250  	mhi_add_ring_element(mhi_cntrl, tre_ring);
1251  	mhi_add_ring_element(mhi_cntrl, buf_ring);
1252  
1253  	write_unlock_bh(&mhi_chan->lock);
1254  
1255  	return 0;
1256  }
1257  
mhi_queue_buf(struct mhi_device * mhi_dev,enum dma_data_direction dir,void * buf,size_t len,enum mhi_flags mflags)1258  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1259  		  void *buf, size_t len, enum mhi_flags mflags)
1260  {
1261  	struct mhi_buf_info buf_info = { };
1262  
1263  	buf_info.v_addr = buf;
1264  	buf_info.cb_buf = buf;
1265  	buf_info.len = len;
1266  
1267  	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1268  }
1269  EXPORT_SYMBOL_GPL(mhi_queue_buf);
1270  
mhi_queue_is_full(struct mhi_device * mhi_dev,enum dma_data_direction dir)1271  bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1272  {
1273  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1274  	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1275  					mhi_dev->ul_chan : mhi_dev->dl_chan;
1276  	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1277  
1278  	return mhi_is_ring_full(mhi_cntrl, tre_ring);
1279  }
1280  EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1281  
mhi_send_cmd(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_cmd_type cmd)1282  int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1283  		 struct mhi_chan *mhi_chan,
1284  		 enum mhi_cmd_type cmd)
1285  {
1286  	struct mhi_ring_element *cmd_tre = NULL;
1287  	struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1288  	struct mhi_ring *ring = &mhi_cmd->ring;
1289  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1290  	int chan = 0;
1291  
1292  	if (mhi_chan)
1293  		chan = mhi_chan->chan;
1294  
1295  	spin_lock_bh(&mhi_cmd->lock);
1296  	if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1297  		spin_unlock_bh(&mhi_cmd->lock);
1298  		return -ENOMEM;
1299  	}
1300  
1301  	/* prepare the cmd tre */
1302  	cmd_tre = ring->wp;
1303  	switch (cmd) {
1304  	case MHI_CMD_RESET_CHAN:
1305  		cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1306  		cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1307  		cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1308  		break;
1309  	case MHI_CMD_STOP_CHAN:
1310  		cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
1311  		cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
1312  		cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
1313  		break;
1314  	case MHI_CMD_START_CHAN:
1315  		cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1316  		cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1317  		cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1318  		break;
1319  	default:
1320  		dev_err(dev, "Command not supported\n");
1321  		break;
1322  	}
1323  
1324  	/* queue to hardware */
1325  	mhi_add_ring_element(mhi_cntrl, ring);
1326  	read_lock_bh(&mhi_cntrl->pm_lock);
1327  	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1328  		mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1329  	read_unlock_bh(&mhi_cntrl->pm_lock);
1330  	spin_unlock_bh(&mhi_cmd->lock);
1331  
1332  	return 0;
1333  }
1334  
mhi_update_channel_state(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,enum mhi_ch_state_type to_state)1335  static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
1336  				    struct mhi_chan *mhi_chan,
1337  				    enum mhi_ch_state_type to_state)
1338  {
1339  	struct device *dev = &mhi_chan->mhi_dev->dev;
1340  	enum mhi_cmd_type cmd = MHI_CMD_NOP;
1341  	int ret;
1342  
1343  	trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
1344  	switch (to_state) {
1345  	case MHI_CH_STATE_TYPE_RESET:
1346  		write_lock_irq(&mhi_chan->lock);
1347  		if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1348  		    mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1349  		    mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1350  			write_unlock_irq(&mhi_chan->lock);
1351  			return -EINVAL;
1352  		}
1353  		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1354  		write_unlock_irq(&mhi_chan->lock);
1355  
1356  		cmd = MHI_CMD_RESET_CHAN;
1357  		break;
1358  	case MHI_CH_STATE_TYPE_STOP:
1359  		if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
1360  			return -EINVAL;
1361  
1362  		cmd = MHI_CMD_STOP_CHAN;
1363  		break;
1364  	case MHI_CH_STATE_TYPE_START:
1365  		if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
1366  		    mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
1367  			return -EINVAL;
1368  
1369  		cmd = MHI_CMD_START_CHAN;
1370  		break;
1371  	default:
1372  		dev_err(dev, "%d: Channel state update to %s not allowed\n",
1373  			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1374  		return -EINVAL;
1375  	}
1376  
1377  	/* bring host and device out of suspended states */
1378  	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
1379  	if (ret)
1380  		return ret;
1381  	mhi_cntrl->runtime_get(mhi_cntrl);
1382  
1383  	reinit_completion(&mhi_chan->completion);
1384  	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
1385  	if (ret) {
1386  		dev_err(dev, "%d: Failed to send %s channel command\n",
1387  			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1388  		goto exit_channel_update;
1389  	}
1390  
1391  	ret = wait_for_completion_timeout(&mhi_chan->completion,
1392  				       msecs_to_jiffies(mhi_cntrl->timeout_ms));
1393  	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1394  		dev_err(dev,
1395  			"%d: Failed to receive %s channel command completion\n",
1396  			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
1397  		ret = -EIO;
1398  		goto exit_channel_update;
1399  	}
1400  
1401  	ret = 0;
1402  
1403  	if (to_state != MHI_CH_STATE_TYPE_RESET) {
1404  		write_lock_irq(&mhi_chan->lock);
1405  		mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
1406  				      MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
1407  		write_unlock_irq(&mhi_chan->lock);
1408  	}
1409  
1410  	trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
1411  exit_channel_update:
1412  	mhi_cntrl->runtime_put(mhi_cntrl);
1413  	mhi_device_put(mhi_cntrl->mhi_dev);
1414  
1415  	return ret;
1416  }
1417  
mhi_unprepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1418  static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1419  				  struct mhi_chan *mhi_chan)
1420  {
1421  	int ret;
1422  	struct device *dev = &mhi_chan->mhi_dev->dev;
1423  
1424  	mutex_lock(&mhi_chan->mutex);
1425  
1426  	if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1427  		dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1428  			TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1429  		goto exit_unprepare_channel;
1430  	}
1431  
1432  	/* no more processing events for this channel */
1433  	ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1434  				       MHI_CH_STATE_TYPE_RESET);
1435  	if (ret)
1436  		dev_err(dev, "%d: Failed to reset channel, still resetting\n",
1437  			mhi_chan->chan);
1438  
1439  exit_unprepare_channel:
1440  	write_lock_irq(&mhi_chan->lock);
1441  	mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1442  	write_unlock_irq(&mhi_chan->lock);
1443  
1444  	if (!mhi_chan->offload_ch) {
1445  		mhi_reset_chan(mhi_cntrl, mhi_chan);
1446  		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1447  	}
1448  	dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
1449  
1450  	mutex_unlock(&mhi_chan->mutex);
1451  }
1452  
mhi_prepare_channel(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan,unsigned int flags)1453  int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1454  			struct mhi_chan *mhi_chan, unsigned int flags)
1455  {
1456  	int ret = 0;
1457  	struct device *dev = &mhi_chan->mhi_dev->dev;
1458  
1459  	if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1460  		dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
1461  			TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
1462  		return -ENOTCONN;
1463  	}
1464  
1465  	mutex_lock(&mhi_chan->mutex);
1466  
1467  	/* Check of client manages channel context for offload channels */
1468  	if (!mhi_chan->offload_ch) {
1469  		ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1470  		if (ret)
1471  			goto error_init_chan;
1472  	}
1473  
1474  	ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
1475  				       MHI_CH_STATE_TYPE_START);
1476  	if (ret)
1477  		goto error_pm_state;
1478  
1479  	if (mhi_chan->dir == DMA_FROM_DEVICE)
1480  		mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
1481  
1482  	/* Pre-allocate buffer for xfer ring */
1483  	if (mhi_chan->pre_alloc) {
1484  		int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1485  						       &mhi_chan->tre_ring);
1486  		size_t len = mhi_cntrl->buffer_len;
1487  
1488  		while (nr_el--) {
1489  			void *buf;
1490  			struct mhi_buf_info info = { };
1491  
1492  			buf = kmalloc(len, GFP_KERNEL);
1493  			if (!buf) {
1494  				ret = -ENOMEM;
1495  				goto error_pre_alloc;
1496  			}
1497  
1498  			/* Prepare transfer descriptors */
1499  			info.v_addr = buf;
1500  			info.cb_buf = buf;
1501  			info.len = len;
1502  			ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1503  			if (ret) {
1504  				kfree(buf);
1505  				goto error_pre_alloc;
1506  			}
1507  		}
1508  
1509  		read_lock_bh(&mhi_cntrl->pm_lock);
1510  		if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1511  			read_lock_irq(&mhi_chan->lock);
1512  			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1513  			read_unlock_irq(&mhi_chan->lock);
1514  		}
1515  		read_unlock_bh(&mhi_cntrl->pm_lock);
1516  	}
1517  
1518  	mutex_unlock(&mhi_chan->mutex);
1519  
1520  	return 0;
1521  
1522  error_pm_state:
1523  	if (!mhi_chan->offload_ch)
1524  		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1525  
1526  error_init_chan:
1527  	mutex_unlock(&mhi_chan->mutex);
1528  
1529  	return ret;
1530  
1531  error_pre_alloc:
1532  	mutex_unlock(&mhi_chan->mutex);
1533  	mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1534  
1535  	return ret;
1536  }
1537  
mhi_mark_stale_events(struct mhi_controller * mhi_cntrl,struct mhi_event * mhi_event,struct mhi_event_ctxt * er_ctxt,int chan)1538  static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1539  				  struct mhi_event *mhi_event,
1540  				  struct mhi_event_ctxt *er_ctxt,
1541  				  int chan)
1542  
1543  {
1544  	struct mhi_ring_element *dev_rp, *local_rp;
1545  	struct mhi_ring *ev_ring;
1546  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1547  	unsigned long flags;
1548  	dma_addr_t ptr;
1549  
1550  	dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1551  
1552  	ev_ring = &mhi_event->ring;
1553  
1554  	/* mark all stale events related to channel as STALE event */
1555  	spin_lock_irqsave(&mhi_event->lock, flags);
1556  
1557  	ptr = le64_to_cpu(er_ctxt->rp);
1558  	if (!is_valid_ring_ptr(ev_ring, ptr)) {
1559  		dev_err(&mhi_cntrl->mhi_dev->dev,
1560  			"Event ring rp points outside of the event ring\n");
1561  		dev_rp = ev_ring->rp;
1562  	} else {
1563  		dev_rp = mhi_to_virtual(ev_ring, ptr);
1564  	}
1565  
1566  	local_rp = ev_ring->rp;
1567  	while (dev_rp != local_rp) {
1568  		if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1569  		    chan == MHI_TRE_GET_EV_CHID(local_rp))
1570  			local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1571  					MHI_PKT_TYPE_STALE_EVENT);
1572  		local_rp++;
1573  		if (local_rp == (ev_ring->base + ev_ring->len))
1574  			local_rp = ev_ring->base;
1575  	}
1576  
1577  	dev_dbg(dev, "Finished marking events as stale events\n");
1578  	spin_unlock_irqrestore(&mhi_event->lock, flags);
1579  }
1580  
mhi_reset_data_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1581  static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1582  				struct mhi_chan *mhi_chan)
1583  {
1584  	struct mhi_ring *buf_ring, *tre_ring;
1585  	struct mhi_result result;
1586  
1587  	/* Reset any pending buffers */
1588  	buf_ring = &mhi_chan->buf_ring;
1589  	tre_ring = &mhi_chan->tre_ring;
1590  	result.transaction_status = -ENOTCONN;
1591  	result.bytes_xferd = 0;
1592  	while (tre_ring->rp != tre_ring->wp) {
1593  		struct mhi_buf_info *buf_info = buf_ring->rp;
1594  
1595  		if (mhi_chan->dir == DMA_TO_DEVICE) {
1596  			atomic_dec(&mhi_cntrl->pending_pkts);
1597  			/* Release the reference got from mhi_queue() */
1598  			mhi_cntrl->runtime_put(mhi_cntrl);
1599  		}
1600  
1601  		if (!buf_info->pre_mapped)
1602  			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1603  
1604  		mhi_del_ring_element(mhi_cntrl, buf_ring);
1605  		mhi_del_ring_element(mhi_cntrl, tre_ring);
1606  
1607  		if (mhi_chan->pre_alloc) {
1608  			kfree(buf_info->cb_buf);
1609  		} else {
1610  			result.buf_addr = buf_info->cb_buf;
1611  			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1612  		}
1613  	}
1614  }
1615  
mhi_reset_chan(struct mhi_controller * mhi_cntrl,struct mhi_chan * mhi_chan)1616  void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1617  {
1618  	struct mhi_event *mhi_event;
1619  	struct mhi_event_ctxt *er_ctxt;
1620  	int chan = mhi_chan->chan;
1621  
1622  	/* Nothing to reset, client doesn't queue buffers */
1623  	if (mhi_chan->offload_ch)
1624  		return;
1625  
1626  	read_lock_bh(&mhi_cntrl->pm_lock);
1627  	mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1628  	er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1629  
1630  	mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1631  
1632  	mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1633  
1634  	read_unlock_bh(&mhi_cntrl->pm_lock);
1635  }
1636  
__mhi_prepare_for_transfer(struct mhi_device * mhi_dev,unsigned int flags)1637  static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
1638  {
1639  	int ret, dir;
1640  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1641  	struct mhi_chan *mhi_chan;
1642  
1643  	for (dir = 0; dir < 2; dir++) {
1644  		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1645  		if (!mhi_chan)
1646  			continue;
1647  
1648  		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
1649  		if (ret)
1650  			goto error_open_chan;
1651  	}
1652  
1653  	return 0;
1654  
1655  error_open_chan:
1656  	for (--dir; dir >= 0; dir--) {
1657  		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1658  		if (!mhi_chan)
1659  			continue;
1660  
1661  		mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1662  	}
1663  
1664  	return ret;
1665  }
1666  
mhi_prepare_for_transfer(struct mhi_device * mhi_dev)1667  int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1668  {
1669  	return __mhi_prepare_for_transfer(mhi_dev, 0);
1670  }
1671  EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1672  
mhi_prepare_for_transfer_autoqueue(struct mhi_device * mhi_dev)1673  int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
1674  {
1675  	return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
1676  }
1677  EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
1678  
mhi_unprepare_from_transfer(struct mhi_device * mhi_dev)1679  void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1680  {
1681  	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1682  	struct mhi_chan *mhi_chan;
1683  	int dir;
1684  
1685  	for (dir = 0; dir < 2; dir++) {
1686  		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1687  		if (!mhi_chan)
1688  			continue;
1689  
1690  		mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1691  	}
1692  }
1693  EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1694  
mhi_get_channel_doorbell_offset(struct mhi_controller * mhi_cntrl,u32 * chdb_offset)1695  int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset)
1696  {
1697  	struct device *dev = &mhi_cntrl->mhi_dev->dev;
1698  	void __iomem *base = mhi_cntrl->regs;
1699  	int ret;
1700  
1701  	ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset);
1702  	if (ret) {
1703  		dev_err(dev, "Unable to read CHDBOFF register\n");
1704  		return -EIO;
1705  	}
1706  
1707  	return 0;
1708  }
1709  EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset);
1710