1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4 *
5 */
6
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
9
10 #include "../common.h"
11
12 extern const struct bus_type mhi_bus_type;
13
14 /* Host request register */
15 #define MHI_SOC_RESET_REQ_OFFSET 0xb0
16 #define MHI_SOC_RESET_REQ BIT(0)
17
18 struct mhi_ctxt {
19 struct mhi_event_ctxt *er_ctxt;
20 struct mhi_chan_ctxt *chan_ctxt;
21 struct mhi_cmd_ctxt *cmd_ctxt;
22 dma_addr_t er_ctxt_addr;
23 dma_addr_t chan_ctxt_addr;
24 dma_addr_t cmd_ctxt_addr;
25 };
26
27 struct bhi_vec_entry {
28 __le64 dma_addr;
29 __le64 size;
30 };
31
32 enum mhi_fw_load_type {
33 MHI_FW_LOAD_BHI, /* BHI only in PBL */
34 MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
35 MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
36 MHI_FW_LOAD_MAX,
37 };
38
39 enum mhi_ch_state_type {
40 MHI_CH_STATE_TYPE_RESET,
41 MHI_CH_STATE_TYPE_STOP,
42 MHI_CH_STATE_TYPE_START,
43 MHI_CH_STATE_TYPE_MAX,
44 };
45
46 #define MHI_CH_STATE_TYPE_LIST \
47 ch_state_type(RESET, "RESET") \
48 ch_state_type(STOP, "STOP") \
49 ch_state_type_end(START, "START")
50
51 extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
52 #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
53 "INVALID_STATE" : \
54 mhi_ch_state_type_str[(state)])
55
56 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
57 mode != MHI_DB_BRST_ENABLE)
58
59 #define MHI_EE_LIST \
60 mhi_ee(PBL, "PRIMARY BOOTLOADER") \
61 mhi_ee(SBL, "SECONDARY BOOTLOADER") \
62 mhi_ee(AMSS, "MISSION MODE") \
63 mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\
64 mhi_ee(WFW, "WLAN FIRMWARE") \
65 mhi_ee(PTHRU, "PASS THROUGH") \
66 mhi_ee(EDL, "EMERGENCY DOWNLOAD") \
67 mhi_ee(FP, "FLASH PROGRAMMER") \
68 mhi_ee(DISABLE_TRANSITION, "DISABLE") \
69 mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED")
70
71 extern const char * const mhi_ee_str[MHI_EE_MAX];
72 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
73 "INVALID_EE" : mhi_ee_str[ee])
74
75 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
76 ee == MHI_EE_EDL)
77 #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
78 #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
79 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
80 ee == MHI_EE_FP)
81
82 enum dev_st_transition {
83 DEV_ST_TRANSITION_PBL,
84 DEV_ST_TRANSITION_READY,
85 DEV_ST_TRANSITION_SBL,
86 DEV_ST_TRANSITION_MISSION_MODE,
87 DEV_ST_TRANSITION_FP,
88 DEV_ST_TRANSITION_SYS_ERR,
89 DEV_ST_TRANSITION_DISABLE,
90 DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE,
91 DEV_ST_TRANSITION_MAX,
92 };
93
94 #define DEV_ST_TRANSITION_LIST \
95 dev_st_trans(PBL, "PBL") \
96 dev_st_trans(READY, "READY") \
97 dev_st_trans(SBL, "SBL") \
98 dev_st_trans(MISSION_MODE, "MISSION MODE") \
99 dev_st_trans(FP, "FLASH PROGRAMMER") \
100 dev_st_trans(SYS_ERR, "SYS ERROR") \
101 dev_st_trans(DISABLE, "DISABLE") \
102 dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)")
103
104 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
105 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
106 "INVALID_STATE" : dev_state_tran_str[state])
107
108 /* internal power states */
109 enum mhi_pm_state {
110 MHI_PM_STATE_DISABLE,
111 MHI_PM_STATE_POR,
112 MHI_PM_STATE_M0,
113 MHI_PM_STATE_M2,
114 MHI_PM_STATE_M3_ENTER,
115 MHI_PM_STATE_M3,
116 MHI_PM_STATE_M3_EXIT,
117 MHI_PM_STATE_FW_DL_ERR,
118 MHI_PM_STATE_SYS_ERR_DETECT,
119 MHI_PM_STATE_SYS_ERR_PROCESS,
120 MHI_PM_STATE_SYS_ERR_FAIL,
121 MHI_PM_STATE_SHUTDOWN_PROCESS,
122 MHI_PM_STATE_LD_ERR_FATAL_DETECT,
123 MHI_PM_STATE_MAX
124 };
125
126 #define MHI_PM_STATE_LIST \
127 mhi_pm_state(DISABLE, "DISABLE") \
128 mhi_pm_state(POR, "POWER ON RESET") \
129 mhi_pm_state(M0, "M0") \
130 mhi_pm_state(M2, "M2") \
131 mhi_pm_state(M3_ENTER, "M?->M3") \
132 mhi_pm_state(M3, "M3") \
133 mhi_pm_state(M3_EXIT, "M3->M0") \
134 mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \
135 mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \
136 mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \
137 mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \
138 mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \
139 mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect")
140
141 #define MHI_PM_DISABLE BIT(0)
142 #define MHI_PM_POR BIT(1)
143 #define MHI_PM_M0 BIT(2)
144 #define MHI_PM_M2 BIT(3)
145 #define MHI_PM_M3_ENTER BIT(4)
146 #define MHI_PM_M3 BIT(5)
147 #define MHI_PM_M3_EXIT BIT(6)
148 /* firmware download failure state */
149 #define MHI_PM_FW_DL_ERR BIT(7)
150 #define MHI_PM_SYS_ERR_DETECT BIT(8)
151 #define MHI_PM_SYS_ERR_PROCESS BIT(9)
152 #define MHI_PM_SYS_ERR_FAIL BIT(10)
153 #define MHI_PM_SHUTDOWN_PROCESS BIT(11)
154 /* link not accessible */
155 #define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
156
157 #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
158 MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
159 MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
160 MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
161 MHI_PM_FW_DL_ERR)))
162 #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
163 #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
164 #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
165 #define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
166 MHI_PM_M2 | MHI_PM_M3_EXIT))
167 #define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2)
168 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state)
169 #define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
170 MHI_PM_IN_ERROR_STATE(pm_state))
171 #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
172 (MHI_PM_M3_ENTER | MHI_PM_M3))
173 #define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
174 (pm_state >= MHI_PM_SYS_ERR_FAIL))
175
176 #define NR_OF_CMD_RINGS 1
177 #define CMD_EL_PER_RING 128
178 #define PRIMARY_CMD_RING 0
179 #define MHI_DEV_WAKE_DB 127
180 #define MHI_MAX_MTU 0xffff
181 #define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk))
182
183 enum mhi_er_type {
184 MHI_ER_TYPE_INVALID = 0x0,
185 MHI_ER_TYPE_VALID = 0x1,
186 };
187
188 struct db_cfg {
189 bool reset_req;
190 bool db_mode;
191 u32 pollcfg;
192 enum mhi_db_brst_mode brstmode;
193 dma_addr_t db_val;
194 void (*process_db)(struct mhi_controller *mhi_cntrl,
195 struct db_cfg *db_cfg, void __iomem *io_addr,
196 dma_addr_t db_val);
197 };
198
199 struct mhi_pm_transitions {
200 enum mhi_pm_state from_state;
201 u32 to_states;
202 };
203
204 struct state_transition {
205 struct list_head node;
206 enum dev_st_transition state;
207 };
208
209 struct mhi_ring {
210 dma_addr_t dma_handle;
211 dma_addr_t iommu_base;
212 __le64 *ctxt_wp; /* point to ctxt wp */
213 void *pre_aligned;
214 void *base;
215 void *rp;
216 void *wp;
217 size_t el_size;
218 size_t len;
219 size_t elements;
220 size_t alloc_size;
221 void __iomem *db_addr;
222 };
223
224 struct mhi_cmd {
225 struct mhi_ring ring;
226 spinlock_t lock;
227 };
228
229 struct mhi_buf_info {
230 void *v_addr;
231 void *bb_addr;
232 void *wp;
233 void *cb_buf;
234 dma_addr_t p_addr;
235 size_t len;
236 enum dma_data_direction dir;
237 bool used; /* Indicates whether the buffer is used or not */
238 bool pre_mapped; /* Already pre-mapped by client */
239 };
240
241 struct mhi_event {
242 struct mhi_controller *mhi_cntrl;
243 struct mhi_chan *mhi_chan; /* dedicated to channel */
244 u32 er_index;
245 u32 intmod;
246 u32 irq;
247 int chan; /* this event ring is dedicated to a channel (optional) */
248 u32 priority;
249 enum mhi_er_data_type data_type;
250 struct mhi_ring ring;
251 struct db_cfg db_cfg;
252 struct tasklet_struct task;
253 spinlock_t lock;
254 int (*process_event)(struct mhi_controller *mhi_cntrl,
255 struct mhi_event *mhi_event,
256 u32 event_quota);
257 bool hw_ring;
258 bool cl_manage;
259 bool offload_ev; /* managed by a device driver */
260 };
261
262 struct mhi_chan {
263 const char *name;
264 /*
265 * Important: When consuming, increment tre_ring first and when
266 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
267 * is guaranteed to have space so we do not need to check both rings.
268 */
269 struct mhi_ring buf_ring;
270 struct mhi_ring tre_ring;
271 u32 chan;
272 u32 er_index;
273 u32 intmod;
274 enum mhi_ch_type type;
275 enum dma_data_direction dir;
276 struct db_cfg db_cfg;
277 enum mhi_ch_ee_mask ee_mask;
278 enum mhi_ch_state ch_state;
279 enum mhi_ev_ccs ccs;
280 struct mhi_device *mhi_dev;
281 void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
282 struct mutex mutex;
283 struct completion completion;
284 rwlock_t lock;
285 struct list_head node;
286 bool lpm_notify;
287 bool configured;
288 bool offload_ch;
289 bool pre_alloc;
290 bool wake_capable;
291 };
292
293 /* Default MHI timeout */
294 #define MHI_TIMEOUT_MS (1000)
295
296 /* debugfs related functions */
297 #ifdef CONFIG_MHI_BUS_DEBUG
298 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
299 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
300 void mhi_debugfs_init(void);
301 void mhi_debugfs_exit(void);
302 #else
mhi_create_debugfs(struct mhi_controller * mhi_cntrl)303 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
304 {
305 }
306
mhi_destroy_debugfs(struct mhi_controller * mhi_cntrl)307 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
308 {
309 }
310
mhi_debugfs_init(void)311 static inline void mhi_debugfs_init(void)
312 {
313 }
314
mhi_debugfs_exit(void)315 static inline void mhi_debugfs_exit(void)
316 {
317 }
318 #endif
319
320 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
321
322 int mhi_destroy_device(struct device *dev, void *data);
323 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
324
325 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
326 struct image_info **image_info, size_t alloc_size);
327 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
328 struct image_info *image_info);
329
330 /* Power management APIs */
331 enum mhi_pm_state __must_check mhi_tryset_pm_state(
332 struct mhi_controller *mhi_cntrl,
333 enum mhi_pm_state state);
334 const char *to_mhi_pm_state_str(u32 state);
335 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
336 enum dev_st_transition state);
337 void mhi_pm_st_worker(struct work_struct *work);
338 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
339 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
340 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
341 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
342 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
343 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
344 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
345 enum mhi_cmd_type cmd);
346 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
mhi_is_active(struct mhi_controller * mhi_cntrl)347 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
348 {
349 return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
350 mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
351 }
352
mhi_trigger_resume(struct mhi_controller * mhi_cntrl)353 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
354 {
355 pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
356 mhi_cntrl->runtime_get(mhi_cntrl);
357 mhi_cntrl->runtime_put(mhi_cntrl);
358 }
359
360 /* Register access methods */
361 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
362 void __iomem *db_addr, dma_addr_t db_val);
363 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
364 struct db_cfg *db_mode, void __iomem *db_addr,
365 dma_addr_t db_val);
366 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
367 void __iomem *base, u32 offset, u32 *out);
368 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
369 void __iomem *base, u32 offset, u32 mask,
370 u32 *out);
371 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
372 void __iomem *base, u32 offset, u32 mask,
373 u32 val, u32 delayus, u32 timeout_ms);
374 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
375 u32 offset, u32 val);
376 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
377 void __iomem *base, u32 offset, u32 mask,
378 u32 val);
379 void mhi_ring_er_db(struct mhi_event *mhi_event);
380 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
381 dma_addr_t db_val);
382 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
383 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
384 struct mhi_chan *mhi_chan);
385
386 /* Initialization methods */
387 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
388 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
389 struct image_info *img_info);
390 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
391
392 /* Automatically allocate and queue inbound buffers */
393 #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
394 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
395 struct mhi_chan *mhi_chan);
396 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
397 struct mhi_chan *mhi_chan);
398 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
399 struct mhi_chan *mhi_chan);
400
401 /* Event processing methods */
402 void mhi_ctrl_ev_task(unsigned long data);
403 void mhi_ev_task(unsigned long data);
404 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
405 struct mhi_event *mhi_event, u32 event_quota);
406 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
407 struct mhi_event *mhi_event, u32 event_quota);
408 void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
409
410 /* ISR handlers */
411 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
412 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
413 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
414
415 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
416 struct mhi_buf_info *info, enum mhi_flags flags);
417 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
418 struct mhi_buf_info *buf_info);
419 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
420 struct mhi_buf_info *buf_info);
421 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
422 struct mhi_buf_info *buf_info);
423 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
424 struct mhi_buf_info *buf_info);
425
426 #endif /* _MHI_INT_H */
427