1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4   *
5   */
6  #ifndef _MHI_H_
7  #define _MHI_H_
8  
9  #include <linux/device.h>
10  #include <linux/dma-direction.h>
11  #include <linux/mutex.h>
12  #include <linux/skbuff.h>
13  #include <linux/slab.h>
14  #include <linux/spinlock.h>
15  #include <linux/wait.h>
16  #include <linux/workqueue.h>
17  
18  #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
19  
20  struct mhi_chan;
21  struct mhi_event;
22  struct mhi_ctxt;
23  struct mhi_cmd;
24  struct mhi_buf_info;
25  
26  /**
27   * enum mhi_callback - MHI callback
28   * @MHI_CB_IDLE: MHI entered idle state
29   * @MHI_CB_PENDING_DATA: New data available for client to process
30   * @MHI_CB_LPM_ENTER: MHI host entered low power mode
31   * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
32   * @MHI_CB_EE_RDDM: MHI device entered RDDM exec env
33   * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
34   * @MHI_CB_SYS_ERROR: MHI device entered error state (may recover)
35   * @MHI_CB_FATAL_ERROR: MHI device entered fatal error state
36   * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
37   */
38  enum mhi_callback {
39  	MHI_CB_IDLE,
40  	MHI_CB_PENDING_DATA,
41  	MHI_CB_LPM_ENTER,
42  	MHI_CB_LPM_EXIT,
43  	MHI_CB_EE_RDDM,
44  	MHI_CB_EE_MISSION_MODE,
45  	MHI_CB_SYS_ERROR,
46  	MHI_CB_FATAL_ERROR,
47  	MHI_CB_BW_REQ,
48  };
49  
50  /**
51   * enum mhi_flags - Transfer flags
52   * @MHI_EOB: End of buffer for bulk transfer
53   * @MHI_EOT: End of transfer
54   * @MHI_CHAIN: Linked transfer
55   */
56  enum mhi_flags {
57  	MHI_EOB = BIT(0),
58  	MHI_EOT = BIT(1),
59  	MHI_CHAIN = BIT(2),
60  };
61  
62  /**
63   * enum mhi_device_type - Device types
64   * @MHI_DEVICE_XFER: Handles data transfer
65   * @MHI_DEVICE_CONTROLLER: Control device
66   */
67  enum mhi_device_type {
68  	MHI_DEVICE_XFER,
69  	MHI_DEVICE_CONTROLLER,
70  };
71  
72  /**
73   * enum mhi_ch_type - Channel types
74   * @MHI_CH_TYPE_INVALID: Invalid channel type
75   * @MHI_CH_TYPE_OUTBOUND: Outbound channel to the device
76   * @MHI_CH_TYPE_INBOUND: Inbound channel from the device
77   * @MHI_CH_TYPE_INBOUND_COALESCED: Coalesced channel for the device to combine
78   *				   multiple packets and send them as a single
79   *				   large packet to reduce CPU consumption
80   */
81  enum mhi_ch_type {
82  	MHI_CH_TYPE_INVALID = 0,
83  	MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE,
84  	MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE,
85  	MHI_CH_TYPE_INBOUND_COALESCED = 3,
86  };
87  
88  /**
89   * struct image_info - Firmware and RDDM table
90   * @mhi_buf: Buffer for firmware and RDDM table
91   * @entries: # of entries in table
92   */
93  struct image_info {
94  	struct mhi_buf *mhi_buf;
95  	/* private: from internal.h */
96  	struct bhi_vec_entry *bhi_vec;
97  	/* public: */
98  	u32 entries;
99  };
100  
101  /**
102   * struct mhi_link_info - BW requirement
103   * target_link_speed - Link speed as defined by TLS bits in LinkControl reg
104   * target_link_width - Link width as defined by NLW bits in LinkStatus reg
105   */
106  struct mhi_link_info {
107  	unsigned int target_link_speed;
108  	unsigned int target_link_width;
109  };
110  
111  /**
112   * enum mhi_ee_type - Execution environment types
113   * @MHI_EE_PBL: Primary Bootloader
114   * @MHI_EE_SBL: Secondary Bootloader
115   * @MHI_EE_AMSS: Modem, aka the primary runtime EE
116   * @MHI_EE_RDDM: Ram dump download mode
117   * @MHI_EE_WFW: WLAN firmware mode
118   * @MHI_EE_PTHRU: Passthrough
119   * @MHI_EE_EDL: Embedded downloader
120   * @MHI_EE_FP: Flash Programmer Environment
121   */
122  enum mhi_ee_type {
123  	MHI_EE_PBL,
124  	MHI_EE_SBL,
125  	MHI_EE_AMSS,
126  	MHI_EE_RDDM,
127  	MHI_EE_WFW,
128  	MHI_EE_PTHRU,
129  	MHI_EE_EDL,
130  	MHI_EE_FP,
131  	MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
132  	MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
133  	MHI_EE_NOT_SUPPORTED,
134  	MHI_EE_MAX,
135  };
136  
137  /**
138   * enum mhi_state - MHI states
139   * @MHI_STATE_RESET: Reset state
140   * @MHI_STATE_READY: Ready state
141   * @MHI_STATE_M0: M0 state
142   * @MHI_STATE_M1: M1 state
143   * @MHI_STATE_M2: M2 state
144   * @MHI_STATE_M3: M3 state
145   * @MHI_STATE_M3_FAST: M3 Fast state
146   * @MHI_STATE_BHI: BHI state
147   * @MHI_STATE_SYS_ERR: System Error state
148   */
149  enum mhi_state {
150  	MHI_STATE_RESET = 0x0,
151  	MHI_STATE_READY = 0x1,
152  	MHI_STATE_M0 = 0x2,
153  	MHI_STATE_M1 = 0x3,
154  	MHI_STATE_M2 = 0x4,
155  	MHI_STATE_M3 = 0x5,
156  	MHI_STATE_M3_FAST = 0x6,
157  	MHI_STATE_BHI = 0x7,
158  	MHI_STATE_SYS_ERR = 0xFF,
159  	MHI_STATE_MAX,
160  };
161  
162  /**
163   * enum mhi_ch_ee_mask - Execution environment mask for channel
164   * @MHI_CH_EE_PBL: Allow channel to be used in PBL EE
165   * @MHI_CH_EE_SBL: Allow channel to be used in SBL EE
166   * @MHI_CH_EE_AMSS: Allow channel to be used in AMSS EE
167   * @MHI_CH_EE_RDDM: Allow channel to be used in RDDM EE
168   * @MHI_CH_EE_PTHRU: Allow channel to be used in PTHRU EE
169   * @MHI_CH_EE_WFW: Allow channel to be used in WFW EE
170   * @MHI_CH_EE_EDL: Allow channel to be used in EDL EE
171   */
172  enum mhi_ch_ee_mask {
173  	MHI_CH_EE_PBL = BIT(MHI_EE_PBL),
174  	MHI_CH_EE_SBL = BIT(MHI_EE_SBL),
175  	MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS),
176  	MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM),
177  	MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU),
178  	MHI_CH_EE_WFW = BIT(MHI_EE_WFW),
179  	MHI_CH_EE_EDL = BIT(MHI_EE_EDL),
180  };
181  
182  /**
183   * enum mhi_er_data_type - Event ring data types
184   * @MHI_ER_DATA: Only client data over this ring
185   * @MHI_ER_CTRL: MHI control data and client data
186   */
187  enum mhi_er_data_type {
188  	MHI_ER_DATA,
189  	MHI_ER_CTRL,
190  };
191  
192  /**
193   * enum mhi_db_brst_mode - Doorbell mode
194   * @MHI_DB_BRST_DISABLE: Burst mode disable
195   * @MHI_DB_BRST_ENABLE: Burst mode enable
196   */
197  enum mhi_db_brst_mode {
198  	MHI_DB_BRST_DISABLE = 0x2,
199  	MHI_DB_BRST_ENABLE = 0x3,
200  };
201  
202  /**
203   * struct mhi_channel_config - Channel configuration structure for controller
204   * @name: The name of this channel
205   * @num: The number assigned to this channel
206   * @num_elements: The number of elements that can be queued to this channel
207   * @local_elements: The local ring length of the channel
208   * @event_ring: The event ring index that services this channel
209   * @dir: Direction that data may flow on this channel
210   * @type: Channel type
211   * @ee_mask: Execution Environment mask for this channel
212   * @pollcfg: Polling configuration for burst mode.  0 is default.  milliseconds
213  	     for UL channels, multiple of 8 ring elements for DL channels
214   * @doorbell: Doorbell mode
215   * @lpm_notify: The channel master requires low power mode notifications
216   * @offload_channel: The client manages the channel completely
217   * @doorbell_mode_switch: Channel switches to doorbell mode on M0 transition
218   * @auto_queue: Framework will automatically queue buffers for DL traffic
219   * @wake-capable: Channel capable of waking up the system
220   */
221  struct mhi_channel_config {
222  	char *name;
223  	u32 num;
224  	u32 num_elements;
225  	u32 local_elements;
226  	u32 event_ring;
227  	enum dma_data_direction dir;
228  	enum mhi_ch_type type;
229  	u32 ee_mask;
230  	u32 pollcfg;
231  	enum mhi_db_brst_mode doorbell;
232  	bool lpm_notify;
233  	bool offload_channel;
234  	bool doorbell_mode_switch;
235  	bool auto_queue;
236  	bool wake_capable;
237  };
238  
239  /**
240   * struct mhi_event_config - Event ring configuration structure for controller
241   * @num_elements: The number of elements that can be queued to this ring
242   * @irq_moderation_ms: Delay irq for additional events to be aggregated
243   * @irq: IRQ associated with this ring
244   * @channel: Dedicated channel number. U32_MAX indicates a non-dedicated ring
245   * @priority: Priority of this ring. Use 1 for now
246   * @mode: Doorbell mode
247   * @data_type: Type of data this ring will process
248   * @hardware_event: This ring is associated with hardware channels
249   * @client_managed: This ring is client managed
250   * @offload_channel: This ring is associated with an offloaded channel
251   */
252  struct mhi_event_config {
253  	u32 num_elements;
254  	u32 irq_moderation_ms;
255  	u32 irq;
256  	u32 channel;
257  	u32 priority;
258  	enum mhi_db_brst_mode mode;
259  	enum mhi_er_data_type data_type;
260  	bool hardware_event;
261  	bool client_managed;
262  	bool offload_channel;
263  };
264  
265  /**
266   * struct mhi_controller_config - Root MHI controller configuration
267   * @max_channels: Maximum number of channels supported
268   * @timeout_ms: Timeout value for operations. 0 means use default
269   * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
270   * @buf_len: Size of automatically allocated buffers. 0 means use default
271   * @num_channels: Number of channels defined in @ch_cfg
272   * @ch_cfg: Array of defined channels
273   * @num_events: Number of event rings defined in @event_cfg
274   * @event_cfg: Array of defined event rings
275   * @use_bounce_buf: Use a bounce buffer pool due to limited DDR access
276   * @m2_no_db: Host is not allowed to ring DB in M2 state
277   */
278  struct mhi_controller_config {
279  	u32 max_channels;
280  	u32 timeout_ms;
281  	u32 ready_timeout_ms;
282  	u32 buf_len;
283  	u32 num_channels;
284  	const struct mhi_channel_config *ch_cfg;
285  	u32 num_events;
286  	struct mhi_event_config *event_cfg;
287  	bool use_bounce_buf;
288  	bool m2_no_db;
289  };
290  
291  /**
292   * struct mhi_controller - Master MHI controller structure
293   * @name: Device name of the MHI controller
294   * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
295   *            controller (required)
296   * @mhi_dev: MHI device instance for the controller
297   * @debugfs_dentry: MHI controller debugfs directory
298   * @regs: Base address of MHI MMIO register space (required)
299   * @bhi: Points to base of MHI BHI register space
300   * @bhie: Points to base of MHI BHIe register space
301   * @wake_db: MHI WAKE doorbell register address
302   * @iova_start: IOMMU starting address for data (required)
303   * @iova_stop: IOMMU stop address for data (required)
304   * @fw_image: Firmware image name for normal booting (optional)
305   * @fw_data: Firmware image data content for normal booting, used only
306   *           if fw_image is NULL and fbc_download is true (optional)
307   * @fw_sz: Firmware image data size for normal booting, used only if fw_image
308   *         is NULL and fbc_download is true (optional)
309   * @edl_image: Firmware image name for emergency download mode (optional)
310   * @rddm_size: RAM dump size that host should allocate for debugging purpose
311   * @sbl_size: SBL image size downloaded through BHIe (optional)
312   * @seg_len: BHIe vector size (optional)
313   * @reg_len: Length of the MHI MMIO region (required)
314   * @fbc_image: Points to firmware image buffer
315   * @rddm_image: Points to RAM dump buffer
316   * @mhi_chan: Points to the channel configuration table
317   * @lpm_chans: List of channels that require LPM notifications
318   * @irq: base irq # to request (required)
319   * @max_chan: Maximum number of channels the controller supports
320   * @total_ev_rings: Total # of event rings allocated
321   * @hw_ev_rings: Number of hardware event rings
322   * @sw_ev_rings: Number of software event rings
323   * @nr_irqs: Number of IRQ allocated by bus master (required)
324   * @serial_number: MHI controller serial number obtained from BHI
325   * @mhi_event: MHI event ring configurations table
326   * @mhi_cmd: MHI command ring configurations table
327   * @mhi_ctxt: MHI device context, shared memory between host and device
328   * @pm_mutex: Mutex for suspend/resume operation
329   * @pm_lock: Lock for protecting MHI power management state
330   * @timeout_ms: Timeout in ms for state transitions
331   * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
332   * @pm_state: MHI power management state
333   * @db_access: DB access states
334   * @ee: MHI device execution environment
335   * @dev_state: MHI device state
336   * @dev_wake: Device wakeup count
337   * @pending_pkts: Pending packets for the controller
338   * @M0, M2, M3: Counters to track number of device MHI state changes
339   * @transition_list: List of MHI state transitions
340   * @transition_lock: Lock for protecting MHI state transition list
341   * @wlock: Lock for protecting device wakeup
342   * @mhi_link_info: Device bandwidth info
343   * @st_worker: State transition worker
344   * @hiprio_wq: High priority workqueue for MHI work such as state transitions
345   * @state_event: State change event
346   * @status_cb: CB function to notify power states of the device (required)
347   * @wake_get: CB function to assert device wake (optional)
348   * @wake_put: CB function to de-assert device wake (optional)
349   * @wake_toggle: CB function to assert and de-assert device wake (optional)
350   * @runtime_get: CB function to controller runtime resume (required)
351   * @runtime_put: CB function to decrement pm usage (required)
352   * @map_single: CB function to create TRE buffer
353   * @unmap_single: CB function to destroy TRE buffer
354   * @read_reg: Read a MHI register via the physical link (required)
355   * @write_reg: Write a MHI register via the physical link (required)
356   * @reset: Controller specific reset function (optional)
357   * @edl_trigger: CB function to trigger EDL mode (optional)
358   * @buffer_len: Bounce buffer length
359   * @index: Index of the MHI controller instance
360   * @bounce_buf: Use of bounce buffer
361   * @fbc_download: MHI host needs to do complete image transfer (optional)
362   * @wake_set: Device wakeup set flag
363   * @irq_flags: irq flags passed to request_irq (optional)
364   * @mru: the default MRU for the MHI device
365   *
366   * Fields marked as (required) need to be populated by the controller driver
367   * before calling mhi_register_controller(). For the fields marked as (optional)
368   * they can be populated depending on the usecase.
369   */
370  struct mhi_controller {
371  	const char *name;
372  	struct device *cntrl_dev;
373  	struct mhi_device *mhi_dev;
374  	struct dentry *debugfs_dentry;
375  	void __iomem *regs;
376  	void __iomem *bhi;
377  	void __iomem *bhie;
378  	void __iomem *wake_db;
379  
380  	dma_addr_t iova_start;
381  	dma_addr_t iova_stop;
382  	const char *fw_image;
383  	const u8 *fw_data;
384  	size_t fw_sz;
385  	const char *edl_image;
386  	size_t rddm_size;
387  	size_t sbl_size;
388  	size_t seg_len;
389  	size_t reg_len;
390  	struct image_info *fbc_image;
391  	struct image_info *rddm_image;
392  	struct mhi_chan *mhi_chan;
393  	struct list_head lpm_chans;
394  	int *irq;
395  	u32 max_chan;
396  	u32 total_ev_rings;
397  	u32 hw_ev_rings;
398  	u32 sw_ev_rings;
399  	u32 nr_irqs;
400  	u32 serial_number;
401  
402  	struct mhi_event *mhi_event;
403  	struct mhi_cmd *mhi_cmd;
404  	struct mhi_ctxt *mhi_ctxt;
405  
406  	struct mutex pm_mutex;
407  	rwlock_t pm_lock;
408  	u32 timeout_ms;
409  	u32 ready_timeout_ms;
410  	u32 pm_state;
411  	u32 db_access;
412  	enum mhi_ee_type ee;
413  	enum mhi_state dev_state;
414  	atomic_t dev_wake;
415  	atomic_t pending_pkts;
416  	u32 M0, M2, M3;
417  	struct list_head transition_list;
418  	spinlock_t transition_lock;
419  	spinlock_t wlock;
420  	struct mhi_link_info mhi_link_info;
421  	struct work_struct st_worker;
422  	struct workqueue_struct *hiprio_wq;
423  	wait_queue_head_t state_event;
424  
425  	void (*status_cb)(struct mhi_controller *mhi_cntrl,
426  			  enum mhi_callback cb);
427  	void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
428  	void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
429  	void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
430  	int (*runtime_get)(struct mhi_controller *mhi_cntrl);
431  	void (*runtime_put)(struct mhi_controller *mhi_cntrl);
432  	int (*map_single)(struct mhi_controller *mhi_cntrl,
433  			  struct mhi_buf_info *buf);
434  	void (*unmap_single)(struct mhi_controller *mhi_cntrl,
435  			     struct mhi_buf_info *buf);
436  	int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
437  			u32 *out);
438  	void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
439  			  u32 val);
440  	void (*reset)(struct mhi_controller *mhi_cntrl);
441  	int (*edl_trigger)(struct mhi_controller *mhi_cntrl);
442  
443  	size_t buffer_len;
444  	int index;
445  	bool bounce_buf;
446  	bool fbc_download;
447  	bool wake_set;
448  	unsigned long irq_flags;
449  	u32 mru;
450  };
451  
452  /**
453   * struct mhi_device - Structure representing an MHI device which binds
454   *                     to channels or is associated with controllers
455   * @id: Pointer to MHI device ID struct
456   * @name: Name of the associated MHI device
457   * @mhi_cntrl: Controller the device belongs to
458   * @ul_chan: UL channel for the device
459   * @dl_chan: DL channel for the device
460   * @dev: Driver model device node for the MHI device
461   * @dev_type: MHI device type
462   * @ul_chan_id: MHI channel id for UL transfer
463   * @dl_chan_id: MHI channel id for DL transfer
464   * @dev_wake: Device wakeup counter
465   */
466  struct mhi_device {
467  	const struct mhi_device_id *id;
468  	const char *name;
469  	struct mhi_controller *mhi_cntrl;
470  	struct mhi_chan *ul_chan;
471  	struct mhi_chan *dl_chan;
472  	struct device dev;
473  	enum mhi_device_type dev_type;
474  	int ul_chan_id;
475  	int dl_chan_id;
476  	u32 dev_wake;
477  };
478  
479  /**
480   * struct mhi_result - Completed buffer information
481   * @buf_addr: Address of data buffer
482   * @bytes_xferd: # of bytes transferred
483   * @dir: Channel direction
484   * @transaction_status: Status of last transaction
485   */
486  struct mhi_result {
487  	void *buf_addr;
488  	size_t bytes_xferd;
489  	enum dma_data_direction dir;
490  	int transaction_status;
491  };
492  
493  /**
494   * struct mhi_buf - MHI Buffer description
495   * @buf: Virtual address of the buffer
496   * @name: Buffer label. For offload channel, configurations name must be:
497   *        ECA - Event context array data
498   *        CCA - Channel context array data
499   * @dma_addr: IOMMU address of the buffer
500   * @len: # of bytes
501   */
502  struct mhi_buf {
503  	void *buf;
504  	const char *name;
505  	dma_addr_t dma_addr;
506  	size_t len;
507  };
508  
509  /**
510   * struct mhi_driver - Structure representing a MHI client driver
511   * @probe: CB function for client driver probe function
512   * @remove: CB function for client driver remove function
513   * @ul_xfer_cb: CB function for UL data transfer
514   * @dl_xfer_cb: CB function for DL data transfer
515   * @status_cb: CB functions for asynchronous status
516   * @driver: Device driver model driver
517   */
518  struct mhi_driver {
519  	const struct mhi_device_id *id_table;
520  	int (*probe)(struct mhi_device *mhi_dev,
521  		     const struct mhi_device_id *id);
522  	void (*remove)(struct mhi_device *mhi_dev);
523  	void (*ul_xfer_cb)(struct mhi_device *mhi_dev,
524  			   struct mhi_result *result);
525  	void (*dl_xfer_cb)(struct mhi_device *mhi_dev,
526  			   struct mhi_result *result);
527  	void (*status_cb)(struct mhi_device *mhi_dev, enum mhi_callback mhi_cb);
528  	struct device_driver driver;
529  };
530  
531  #define to_mhi_driver(drv) container_of_const(drv, struct mhi_driver, driver)
532  #define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
533  
534  /**
535   * mhi_alloc_controller - Allocate the MHI Controller structure
536   * Allocate the mhi_controller structure using zero initialized memory
537   */
538  struct mhi_controller *mhi_alloc_controller(void);
539  
540  /**
541   * mhi_free_controller - Free the MHI Controller structure
542   * Free the mhi_controller structure which was previously allocated
543   */
544  void mhi_free_controller(struct mhi_controller *mhi_cntrl);
545  
546  /**
547   * mhi_register_controller - Register MHI controller
548   * @mhi_cntrl: MHI controller to register
549   * @config: Configuration to use for the controller
550   */
551  int mhi_register_controller(struct mhi_controller *mhi_cntrl,
552  			const struct mhi_controller_config *config);
553  
554  /**
555   * mhi_unregister_controller - Unregister MHI controller
556   * @mhi_cntrl: MHI controller to unregister
557   */
558  void mhi_unregister_controller(struct mhi_controller *mhi_cntrl);
559  
560  /*
561   * module_mhi_driver() - Helper macro for drivers that don't do
562   * anything special other than using default mhi_driver_register() and
563   * mhi_driver_unregister().  This eliminates a lot of boilerplate.
564   * Each module may only use this macro once.
565   */
566  #define module_mhi_driver(mhi_drv) \
567  	module_driver(mhi_drv, mhi_driver_register, \
568  		      mhi_driver_unregister)
569  
570  /*
571   * Macro to avoid include chaining to get THIS_MODULE
572   */
573  #define mhi_driver_register(mhi_drv) \
574  	__mhi_driver_register(mhi_drv, THIS_MODULE)
575  
576  /**
577   * __mhi_driver_register - Register driver with MHI framework
578   * @mhi_drv: Driver associated with the device
579   * @owner: The module owner
580   */
581  int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner);
582  
583  /**
584   * mhi_driver_unregister - Unregister a driver for mhi_devices
585   * @mhi_drv: Driver associated with the device
586   */
587  void mhi_driver_unregister(struct mhi_driver *mhi_drv);
588  
589  /**
590   * mhi_set_mhi_state - Set MHI device state
591   * @mhi_cntrl: MHI controller
592   * @state: State to set
593   */
594  void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
595  		       enum mhi_state state);
596  
597  /**
598   * mhi_notify - Notify the MHI client driver about client device status
599   * @mhi_dev: MHI device instance
600   * @cb_reason: MHI callback reason
601   */
602  void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
603  
604  /**
605   * mhi_get_free_desc_count - Get transfer ring length
606   * Get # of TD available to queue buffers
607   * @mhi_dev: Device associated with the channels
608   * @dir: Direction of the channel
609   */
610  int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
611  				enum dma_data_direction dir);
612  
613  /**
614   * mhi_prepare_for_power_up - Do pre-initialization before power up.
615   *                            This is optional, call this before power up if
616   *                            the controller does not want bus framework to
617   *                            automatically free any allocated memory during
618   *                            shutdown process.
619   * @mhi_cntrl: MHI controller
620   */
621  int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
622  
623  /**
624   * mhi_async_power_up - Start MHI power up sequence
625   * @mhi_cntrl: MHI controller
626   */
627  int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
628  
629  /**
630   * mhi_sync_power_up - Start MHI power up sequence and wait till the device
631   *                     enters valid EE state
632   * @mhi_cntrl: MHI controller
633   */
634  int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
635  
636  /**
637   * mhi_power_down - Power down the MHI device and also destroy the
638   *                  'struct device' for the channels associated with it.
639   *                  See also mhi_power_down_keep_dev() which is a variant
640   *                  of this API that keeps the 'struct device' for channels
641   *                  (useful during suspend/hibernation).
642   * @mhi_cntrl: MHI controller
643   * @graceful: Link is still accessible, so do a graceful shutdown process
644   */
645  void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
646  
647  /**
648   * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct
649   *                           device' for the channels associated with it.
650   *                           This is a variant of 'mhi_power_down()' and
651   *                           useful in scenarios such as suspend/hibernation
652   *                           where destroying of the 'struct device' is not
653   *                           needed.
654   * @mhi_cntrl: MHI controller
655   * @graceful: Link is still accessible, so do a graceful shutdown process
656   */
657  void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful);
658  
659  /**
660   * mhi_unprepare_after_power_down - Free any allocated memory after power down
661   * @mhi_cntrl: MHI controller
662   */
663  void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
664  
665  /**
666   * mhi_pm_suspend - Move MHI into a suspended state
667   * @mhi_cntrl: MHI controller
668   */
669  int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
670  
671  /**
672   * mhi_pm_resume - Resume MHI from suspended state
673   * @mhi_cntrl: MHI controller
674   */
675  int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
676  
677  /**
678   * mhi_pm_resume_force - Force resume MHI from suspended state
679   * @mhi_cntrl: MHI controller
680   *
681   * Resume the device irrespective of its MHI state. As per the MHI spec, devices
682   * has to be in M3 state during resume. But some devices seem to be in a
683   * different MHI state other than M3 but they continue working fine if allowed.
684   * This API is intented to be used for such devices.
685   *
686   * Return: 0 if the resume succeeds, a negative error code otherwise
687   */
688  int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
689  
690  /**
691   * mhi_download_rddm_image - Download ramdump image from device for
692   *                           debugging purpose.
693   * @mhi_cntrl: MHI controller
694   * @in_panic: Download rddm image during kernel panic
695   */
696  int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic);
697  
698  /**
699   * mhi_force_rddm_mode - Force device into rddm mode
700   * @mhi_cntrl: MHI controller
701   */
702  int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
703  
704  /**
705   * mhi_get_exec_env - Get BHI execution environment of the device
706   * @mhi_cntrl: MHI controller
707   */
708  enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
709  
710  /**
711   * mhi_get_mhi_state - Get MHI state of the device
712   * @mhi_cntrl: MHI controller
713   */
714  enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
715  
716  /**
717   * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
718   *		   to reset and recover a device.
719   * @mhi_cntrl: MHI controller
720   */
721  void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
722  
723  /**
724   * mhi_device_get - Disable device low power mode
725   * @mhi_dev: Device associated with the channel
726   */
727  void mhi_device_get(struct mhi_device *mhi_dev);
728  
729  /**
730   * mhi_device_get_sync - Disable device low power mode. Synchronously
731   *                       take the controller out of suspended state
732   * @mhi_dev: Device associated with the channel
733   */
734  int mhi_device_get_sync(struct mhi_device *mhi_dev);
735  
736  /**
737   * mhi_device_put - Re-enable device low power mode
738   * @mhi_dev: Device associated with the channel
739   */
740  void mhi_device_put(struct mhi_device *mhi_dev);
741  
742  /**
743   * mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
744   * @mhi_dev: Device associated with the channels
745   *
746   * Allocate and initialize the channel context and also issue the START channel
747   * command to both channels. Channels can be started only if both host and
748   * device execution environments match and channels are in a DISABLED state.
749   */
750  int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
751  
752  /**
753   * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
754   *                                      buffers for DL traffic
755   * @mhi_dev: Device associated with the channels
756   *
757   * Allocate and initialize the channel context and also issue the START channel
758   * command to both channels. Channels can be started only if both host and
759   * device execution environments match and channels are in a DISABLED state.
760   * The MHI core will automatically allocate and queue buffers for the DL traffic.
761   */
762  int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
763  
764  /**
765   * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
766   *                               Issue the RESET channel command and let the
767   *                               device clean-up the context so no incoming
768   *                               transfers are seen on the host. Free memory
769   *                               associated with the context on host. If device
770   *                               is unresponsive, only perform a host side
771   *                               clean-up. Channels can be reset only if both
772   *                               host and device execution environments match
773   *                               and channels are in an ENABLED, STOPPED or
774   *                               SUSPENDED state.
775   * @mhi_dev: Device associated with the channels
776   */
777  void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
778  
779  /**
780   * mhi_queue_dma - Send or receive DMA mapped buffers from client device
781   *                 over MHI channel
782   * @mhi_dev: Device associated with the channels
783   * @dir: DMA direction for the channel
784   * @mhi_buf: Buffer for holding the DMA mapped data
785   * @len: Buffer length
786   * @mflags: MHI transfer flags used for the transfer
787   */
788  int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
789  		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
790  
791  /**
792   * mhi_queue_buf - Send or receive raw buffers from client device over MHI
793   *                 channel
794   * @mhi_dev: Device associated with the channels
795   * @dir: DMA direction for the channel
796   * @buf: Buffer for holding the data
797   * @len: Buffer length
798   * @mflags: MHI transfer flags used for the transfer
799   */
800  int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
801  		  void *buf, size_t len, enum mhi_flags mflags);
802  
803  /**
804   * mhi_queue_skb - Send or receive SKBs from client device over MHI channel
805   * @mhi_dev: Device associated with the channels
806   * @dir: DMA direction for the channel
807   * @skb: Buffer for holding SKBs
808   * @len: Buffer length
809   * @mflags: MHI transfer flags used for the transfer
810   */
811  int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
812  		  struct sk_buff *skb, size_t len, enum mhi_flags mflags);
813  
814  /**
815   * mhi_queue_is_full - Determine whether queueing new elements is possible
816   * @mhi_dev: Device associated with the channels
817   * @dir: DMA direction for the channel
818   */
819  bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
820  
821  /**
822   * mhi_get_channel_doorbell_offset - Get the channel doorbell offset
823   * @mhi_cntrl: MHI controller
824   * @chdb_offset: Read channel doorbell offset
825   *
826   * Return: 0 if the read succeeds, a negative error code otherwise
827   */
828  int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset);
829  
830  #endif /* _MHI_H_ */
831