1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
6 *
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
10 */
11
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
37 #include "ufs_bsg.h"
38 #include "ufshcd-crypto.h"
39 #include <linux/unaligned.h>
40
41 #define CREATE_TRACE_POINTS
42 #include "ufs_trace.h"
43
44 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
45 UTP_TASK_REQ_COMPL |\
46 UFSHCD_ERROR_MASK)
47
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
49 UFSHCD_ERROR_MASK |\
50 MCQ_CQ_EVENT_STATUS)
51
52
53 /* UIC command timeout, unit: ms */
54 enum {
55 UIC_CMD_TIMEOUT_DEFAULT = 500,
56 UIC_CMD_TIMEOUT_MAX = 2000,
57 };
58 /* NOP OUT retries waiting for NOP IN response */
59 #define NOP_OUT_RETRIES 10
60 /* Timeout after 50 msecs if NOP OUT hangs without response */
61 #define NOP_OUT_TIMEOUT 50 /* msecs */
62
63 /* Query request retries */
64 #define QUERY_REQ_RETRIES 3
65 /* Query request timeout */
66 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
67
68 /* Advanced RPMB request timeout */
69 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
70
71 /* Task management command timeout */
72 #define TM_CMD_TIMEOUT 100 /* msecs */
73
74 /* maximum number of retries for a general UIC command */
75 #define UFS_UIC_COMMAND_RETRIES 3
76
77 /* maximum number of link-startup retries */
78 #define DME_LINKSTARTUP_RETRIES 3
79
80 /* maximum number of reset retries before giving up */
81 #define MAX_HOST_RESET_RETRIES 5
82
83 /* Maximum number of error handler retries before giving up */
84 #define MAX_ERR_HANDLER_RETRIES 5
85
86 /* Expose the flag value from utp_upiu_query.value */
87 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
88
89 /* Interrupt aggregation default timeout, unit: 40us */
90 #define INT_AGGR_DEF_TO 0x02
91
92 /* default delay of autosuspend: 2000 ms */
93 #define RPM_AUTOSUSPEND_DELAY_MS 2000
94
95 /* Default delay of RPM device flush delayed work */
96 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
97
98 /* Default value of wait time before gating device ref clock */
99 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
100
101 /* Polling time to wait for fDeviceInit */
102 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
103
104 /* Default RTC update every 10 seconds */
105 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
106
107 /* bMaxNumOfRTT is equal to two after device manufacturing */
108 #define DEFAULT_MAX_NUM_RTT 2
109
110 /* UFSHC 4.0 compliant HC support this mode. */
111 static bool use_mcq_mode = true;
112
is_mcq_supported(struct ufs_hba * hba)113 static bool is_mcq_supported(struct ufs_hba *hba)
114 {
115 return hba->mcq_sup && use_mcq_mode;
116 }
117
118 module_param(use_mcq_mode, bool, 0644);
119 MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
120
121 static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
122
uic_cmd_timeout_set(const char * val,const struct kernel_param * kp)123 static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
124 {
125 return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
126 UIC_CMD_TIMEOUT_MAX);
127 }
128
129 static const struct kernel_param_ops uic_cmd_timeout_ops = {
130 .set = uic_cmd_timeout_set,
131 .get = param_get_uint,
132 };
133
134 module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
135 MODULE_PARM_DESC(uic_cmd_timeout,
136 "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
137
138 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
139 ({ \
140 int _ret; \
141 if (_on) \
142 _ret = ufshcd_enable_vreg(_dev, _vreg); \
143 else \
144 _ret = ufshcd_disable_vreg(_dev, _vreg); \
145 _ret; \
146 })
147
148 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
149 size_t __len = (len); \
150 print_hex_dump(KERN_ERR, prefix_str, \
151 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
152 16, 4, buf, __len, false); \
153 } while (0)
154
ufshcd_dump_regs(struct ufs_hba * hba,size_t offset,size_t len,const char * prefix)155 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
156 const char *prefix)
157 {
158 u32 *regs;
159 size_t pos;
160
161 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
162 return -EINVAL;
163
164 regs = kzalloc(len, GFP_ATOMIC);
165 if (!regs)
166 return -ENOMEM;
167
168 for (pos = 0; pos < len; pos += 4) {
169 if (offset == 0 &&
170 pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
171 pos <= REG_UIC_ERROR_CODE_DME)
172 continue;
173 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
174 }
175
176 ufshcd_hex_dump(prefix, regs, len);
177 kfree(regs);
178
179 return 0;
180 }
181 EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
182
183 enum {
184 UFSHCD_MAX_CHANNEL = 0,
185 UFSHCD_MAX_ID = 1,
186 };
187
188 static const char *const ufshcd_state_name[] = {
189 [UFSHCD_STATE_RESET] = "reset",
190 [UFSHCD_STATE_OPERATIONAL] = "operational",
191 [UFSHCD_STATE_ERROR] = "error",
192 [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
193 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
194 };
195
196 /* UFSHCD error handling flags */
197 enum {
198 UFSHCD_EH_IN_PROGRESS = (1 << 0),
199 };
200
201 /* UFSHCD UIC layer error flags */
202 enum {
203 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
204 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
205 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
206 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
207 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
208 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
209 UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
210 };
211
212 #define ufshcd_set_eh_in_progress(h) \
213 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
214 #define ufshcd_eh_in_progress(h) \
215 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
216 #define ufshcd_clear_eh_in_progress(h) \
217 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
218
219 const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
220 [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
221 [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
222 [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
223 [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
224 [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
225 [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
226 /*
227 * For DeepSleep, the link is first put in hibern8 and then off.
228 * Leaving the link in hibern8 is not supported.
229 */
230 [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
231 };
232
233 static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)234 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
235 {
236 return ufs_pm_lvl_states[lvl].dev_state;
237 }
238
239 static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)240 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
241 {
242 return ufs_pm_lvl_states[lvl].link_state;
243 }
244
245 static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,enum uic_link_state link_state)246 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
247 enum uic_link_state link_state)
248 {
249 enum ufs_pm_level lvl;
250
251 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
252 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
253 (ufs_pm_lvl_states[lvl].link_state == link_state))
254 return lvl;
255 }
256
257 /* if no match found, return the level 0 */
258 return UFS_PM_LVL_0;
259 }
260
ufshcd_has_pending_tasks(struct ufs_hba * hba)261 static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
262 {
263 return hba->outstanding_tasks || hba->active_uic_cmd ||
264 hba->uic_async_done;
265 }
266
ufshcd_is_ufs_dev_busy(struct ufs_hba * hba)267 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
268 {
269 return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
270 }
271
272 static const struct ufs_dev_quirk ufs_fixups[] = {
273 /* UFS cards deviations table */
274 { .wmanufacturerid = UFS_VENDOR_MICRON,
275 .model = UFS_ANY_MODEL,
276 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
277 { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
278 .model = UFS_ANY_MODEL,
279 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
280 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
281 UFS_DEVICE_QUIRK_PA_HIBER8TIME |
282 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
283 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
284 .model = UFS_ANY_MODEL,
285 .quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
286 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
287 .model = "hB8aL1" /*H28U62301AMR*/,
288 .quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
289 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
290 .model = UFS_ANY_MODEL,
291 .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
292 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
293 .model = "THGLF2G9C8KBADG",
294 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
295 { .wmanufacturerid = UFS_VENDOR_TOSHIBA,
296 .model = "THGLF2G9D8KBADG",
297 .quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
298 {}
299 };
300
301 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
302 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
303 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
304 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
305 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
306 static void ufshcd_hba_exit(struct ufs_hba *hba);
307 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params);
308 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
309 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
310 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
311 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
312 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
313 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
314 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
315 bool scale_up);
316 static irqreturn_t ufshcd_intr(int irq, void *__hba);
317 static int ufshcd_change_power_mode(struct ufs_hba *hba,
318 struct ufs_pa_layer_attr *pwr_mode);
319 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
320 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
321 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
322 struct ufs_vreg *vreg);
323 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
324 bool enable);
325 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
326 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
327
ufshcd_enable_irq(struct ufs_hba * hba)328 void ufshcd_enable_irq(struct ufs_hba *hba)
329 {
330 if (!hba->is_irq_enabled) {
331 enable_irq(hba->irq);
332 hba->is_irq_enabled = true;
333 }
334 }
335 EXPORT_SYMBOL_GPL(ufshcd_enable_irq);
336
ufshcd_disable_irq(struct ufs_hba * hba)337 void ufshcd_disable_irq(struct ufs_hba *hba)
338 {
339 if (hba->is_irq_enabled) {
340 disable_irq(hba->irq);
341 hba->is_irq_enabled = false;
342 }
343 }
344 EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
345
ufshcd_configure_wb(struct ufs_hba * hba)346 static void ufshcd_configure_wb(struct ufs_hba *hba)
347 {
348 if (!ufshcd_is_wb_allowed(hba))
349 return;
350
351 ufshcd_wb_toggle(hba, true);
352
353 ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
354
355 if (ufshcd_is_wb_buf_flush_allowed(hba))
356 ufshcd_wb_toggle_buf_flush(hba, true);
357 }
358
ufshcd_add_cmd_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)359 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
360 enum ufs_trace_str_t str_t)
361 {
362 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
363 struct utp_upiu_header *header;
364
365 if (!trace_ufshcd_upiu_enabled())
366 return;
367
368 if (str_t == UFS_CMD_SEND)
369 header = &rq->header;
370 else
371 header = &hba->lrb[tag].ucd_rsp_ptr->header;
372
373 trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
374 UFS_TSF_CDB);
375 }
376
ufshcd_add_query_upiu_trace(struct ufs_hba * hba,enum ufs_trace_str_t str_t,struct utp_upiu_req * rq_rsp)377 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
378 enum ufs_trace_str_t str_t,
379 struct utp_upiu_req *rq_rsp)
380 {
381 if (!trace_ufshcd_upiu_enabled())
382 return;
383
384 trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
385 &rq_rsp->qr, UFS_TSF_OSF);
386 }
387
ufshcd_add_tm_upiu_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)388 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
389 enum ufs_trace_str_t str_t)
390 {
391 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
392
393 if (!trace_ufshcd_upiu_enabled())
394 return;
395
396 if (str_t == UFS_TM_SEND)
397 trace_ufshcd_upiu(hba, str_t,
398 &descp->upiu_req.req_header,
399 &descp->upiu_req.input_param1,
400 UFS_TSF_TM_INPUT);
401 else
402 trace_ufshcd_upiu(hba, str_t,
403 &descp->upiu_rsp.rsp_header,
404 &descp->upiu_rsp.output_param1,
405 UFS_TSF_TM_OUTPUT);
406 }
407
ufshcd_add_uic_command_trace(struct ufs_hba * hba,const struct uic_command * ucmd,enum ufs_trace_str_t str_t)408 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
409 const struct uic_command *ucmd,
410 enum ufs_trace_str_t str_t)
411 {
412 u32 cmd;
413
414 if (!trace_ufshcd_uic_command_enabled())
415 return;
416
417 if (str_t == UFS_CMD_SEND)
418 cmd = ucmd->command;
419 else
420 cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
421
422 trace_ufshcd_uic_command(hba, str_t, cmd,
423 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
424 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
425 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
426 }
427
ufshcd_add_command_trace(struct ufs_hba * hba,unsigned int tag,enum ufs_trace_str_t str_t)428 static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
429 enum ufs_trace_str_t str_t)
430 {
431 u64 lba = 0;
432 u8 opcode = 0, group_id = 0;
433 u32 doorbell = 0;
434 u32 intr;
435 int hwq_id = -1;
436 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
437 struct scsi_cmnd *cmd = lrbp->cmd;
438 struct request *rq = scsi_cmd_to_rq(cmd);
439 int transfer_len = -1;
440
441 if (!cmd)
442 return;
443
444 /* trace UPIU also */
445 ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
446 if (!trace_ufshcd_command_enabled())
447 return;
448
449 opcode = cmd->cmnd[0];
450
451 if (opcode == READ_10 || opcode == WRITE_10) {
452 /*
453 * Currently we only fully trace read(10) and write(10) commands
454 */
455 transfer_len =
456 be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
457 lba = scsi_get_lba(cmd);
458 if (opcode == WRITE_10)
459 group_id = lrbp->cmd->cmnd[6];
460 } else if (opcode == UNMAP) {
461 /*
462 * The number of Bytes to be unmapped beginning with the lba.
463 */
464 transfer_len = blk_rq_bytes(rq);
465 lba = scsi_get_lba(cmd);
466 }
467
468 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
469
470 if (hba->mcq_enabled) {
471 struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
472
473 hwq_id = hwq->id;
474 } else {
475 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
476 }
477 trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
478 transfer_len, intr, lba, opcode, group_id);
479 }
480
ufshcd_print_clk_freqs(struct ufs_hba * hba)481 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
482 {
483 struct ufs_clk_info *clki;
484 struct list_head *head = &hba->clk_list_head;
485
486 if (list_empty(head))
487 return;
488
489 list_for_each_entry(clki, head, list) {
490 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
491 clki->max_freq)
492 dev_err(hba->dev, "clk: %s, rate: %u\n",
493 clki->name, clki->curr_freq);
494 }
495 }
496
ufshcd_print_evt(struct ufs_hba * hba,u32 id,const char * err_name)497 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
498 const char *err_name)
499 {
500 int i;
501 bool found = false;
502 const struct ufs_event_hist *e;
503
504 if (id >= UFS_EVT_CNT)
505 return;
506
507 e = &hba->ufs_stats.event[id];
508
509 for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
510 int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
511
512 if (e->tstamp[p] == 0)
513 continue;
514 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
515 e->val[p], div_u64(e->tstamp[p], 1000));
516 found = true;
517 }
518
519 if (!found)
520 dev_err(hba->dev, "No record of %s\n", err_name);
521 else
522 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
523 }
524
ufshcd_print_evt_hist(struct ufs_hba * hba)525 static void ufshcd_print_evt_hist(struct ufs_hba *hba)
526 {
527 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
528
529 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
530 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
531 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
532 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
533 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
534 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
535 "auto_hibern8_err");
536 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
537 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
538 "link_startup_fail");
539 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
540 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
541 "suspend_fail");
542 ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
543 ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
544 "wlun suspend_fail");
545 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
546 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
547 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
548
549 ufshcd_vops_dbg_register_dump(hba);
550 }
551
552 static
ufshcd_print_tr(struct ufs_hba * hba,int tag,bool pr_prdt)553 void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
554 {
555 const struct ufshcd_lrb *lrbp;
556 int prdt_length;
557
558 lrbp = &hba->lrb[tag];
559
560 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
561 tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
562 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
563 tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
564 dev_err(hba->dev,
565 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
566 tag, (u64)lrbp->utrd_dma_addr);
567
568 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
569 sizeof(struct utp_transfer_req_desc));
570 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
571 (u64)lrbp->ucd_req_dma_addr);
572 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
573 sizeof(struct utp_upiu_req));
574 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
575 (u64)lrbp->ucd_rsp_dma_addr);
576 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
577 sizeof(struct utp_upiu_rsp));
578
579 prdt_length = le16_to_cpu(
580 lrbp->utr_descriptor_ptr->prd_table_length);
581 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
582 prdt_length /= ufshcd_sg_entry_size(hba);
583
584 dev_err(hba->dev,
585 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
586 tag, prdt_length,
587 (u64)lrbp->ucd_prdt_dma_addr);
588
589 if (pr_prdt)
590 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
591 ufshcd_sg_entry_size(hba) * prdt_length);
592 }
593
ufshcd_print_tr_iter(struct request * req,void * priv)594 static bool ufshcd_print_tr_iter(struct request *req, void *priv)
595 {
596 struct scsi_device *sdev = req->q->queuedata;
597 struct Scsi_Host *shost = sdev->host;
598 struct ufs_hba *hba = shost_priv(shost);
599
600 ufshcd_print_tr(hba, req->tag, *(bool *)priv);
601
602 return true;
603 }
604
605 /**
606 * ufshcd_print_trs_all - print trs for all started requests.
607 * @hba: per-adapter instance.
608 * @pr_prdt: need to print prdt or not.
609 */
ufshcd_print_trs_all(struct ufs_hba * hba,bool pr_prdt)610 static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
611 {
612 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
613 }
614
ufshcd_print_tmrs(struct ufs_hba * hba,unsigned long bitmap)615 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
616 {
617 int tag;
618
619 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
620 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
621
622 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
623 ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
624 }
625 }
626
ufshcd_print_host_state(struct ufs_hba * hba)627 static void ufshcd_print_host_state(struct ufs_hba *hba)
628 {
629 const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
630
631 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
632 dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
633 scsi_host_busy(hba->host), hba->outstanding_tasks);
634 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
635 hba->saved_err, hba->saved_uic_err);
636 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
637 hba->curr_dev_pwr_mode, hba->uic_link_state);
638 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
639 hba->pm_op_in_progress, hba->is_sys_suspended);
640 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
641 hba->auto_bkops_enabled, hba->host->host_self_blocked);
642 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
643 dev_err(hba->dev,
644 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
645 div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
646 hba->ufs_stats.hibern8_exit_cnt);
647 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
648 div_u64(hba->ufs_stats.last_intr_ts, 1000),
649 hba->ufs_stats.last_intr_status);
650 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
651 hba->eh_flags, hba->req_abort_count);
652 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
653 hba->ufs_version, hba->capabilities, hba->caps);
654 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
655 hba->dev_quirks);
656 if (sdev_ufs)
657 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
658 sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
659
660 ufshcd_print_clk_freqs(hba);
661 }
662
663 /**
664 * ufshcd_print_pwr_info - print power params as saved in hba
665 * power info
666 * @hba: per-adapter instance
667 */
ufshcd_print_pwr_info(struct ufs_hba * hba)668 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
669 {
670 static const char * const names[] = {
671 "INVALID MODE",
672 "FAST MODE",
673 "SLOW_MODE",
674 "INVALID MODE",
675 "FASTAUTO_MODE",
676 "SLOWAUTO_MODE",
677 "INVALID MODE",
678 };
679
680 /*
681 * Using dev_dbg to avoid messages during runtime PM to avoid
682 * never-ending cycles of messages written back to storage by user space
683 * causing runtime resume, causing more messages and so on.
684 */
685 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
686 __func__,
687 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
688 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
689 names[hba->pwr_info.pwr_rx],
690 names[hba->pwr_info.pwr_tx],
691 hba->pwr_info.hs_rate);
692 }
693
ufshcd_device_reset(struct ufs_hba * hba)694 static void ufshcd_device_reset(struct ufs_hba *hba)
695 {
696 int err;
697
698 err = ufshcd_vops_device_reset(hba);
699
700 if (!err) {
701 ufshcd_set_ufs_dev_active(hba);
702 if (ufshcd_is_wb_allowed(hba)) {
703 hba->dev_info.wb_enabled = false;
704 hba->dev_info.wb_buf_flush_enabled = false;
705 }
706 if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
707 hba->dev_info.rtc_time_baseline = 0;
708 }
709 if (err != -EOPNOTSUPP)
710 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
711 }
712
ufshcd_delay_us(unsigned long us,unsigned long tolerance)713 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
714 {
715 if (!us)
716 return;
717
718 if (us < 10)
719 udelay(us);
720 else
721 usleep_range(us, us + tolerance);
722 }
723 EXPORT_SYMBOL_GPL(ufshcd_delay_us);
724
725 /**
726 * ufshcd_wait_for_register - wait for register value to change
727 * @hba: per-adapter interface
728 * @reg: mmio register offset
729 * @mask: mask to apply to the read register value
730 * @val: value to wait for
731 * @interval_us: polling interval in microseconds
732 * @timeout_ms: timeout in milliseconds
733 *
734 * Return: -ETIMEDOUT on error, zero on success.
735 */
ufshcd_wait_for_register(struct ufs_hba * hba,u32 reg,u32 mask,u32 val,unsigned long interval_us,unsigned long timeout_ms)736 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
737 u32 val, unsigned long interval_us,
738 unsigned long timeout_ms)
739 {
740 u32 v;
741
742 val &= mask; /* ignore bits that we don't intend to wait on */
743
744 return read_poll_timeout(ufshcd_readl, v, (v & mask) == val,
745 interval_us, timeout_ms * 1000, false, hba, reg);
746 }
747
748 /**
749 * ufshcd_get_intr_mask - Get the interrupt bit mask
750 * @hba: Pointer to adapter instance
751 *
752 * Return: interrupt bit mask per version
753 */
ufshcd_get_intr_mask(struct ufs_hba * hba)754 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
755 {
756 if (hba->ufs_version <= ufshci_version(2, 0))
757 return INTERRUPT_MASK_ALL_VER_11;
758
759 return INTERRUPT_MASK_ALL_VER_21;
760 }
761
762 /**
763 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
764 * @hba: Pointer to adapter instance
765 *
766 * Return: UFSHCI version supported by the controller
767 */
ufshcd_get_ufs_version(struct ufs_hba * hba)768 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
769 {
770 u32 ufshci_ver;
771
772 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
773 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
774 else
775 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
776
777 /*
778 * UFSHCI v1.x uses a different version scheme, in order
779 * to allow the use of comparisons with the ufshci_version
780 * function, we convert it to the same scheme as ufs 2.0+.
781 */
782 if (ufshci_ver & 0x00010000)
783 return ufshci_version(1, ufshci_ver & 0x00000100);
784
785 return ufshci_ver;
786 }
787
788 /**
789 * ufshcd_is_device_present - Check if any device connected to
790 * the host controller
791 * @hba: pointer to adapter instance
792 *
793 * Return: true if device present, false if no device detected
794 */
ufshcd_is_device_present(struct ufs_hba * hba)795 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
796 {
797 return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
798 }
799
800 /**
801 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
802 * @lrbp: pointer to local command reference block
803 * @cqe: pointer to the completion queue entry
804 *
805 * This function is used to get the OCS field from UTRD
806 *
807 * Return: the OCS field in the UTRD.
808 */
ufshcd_get_tr_ocs(struct ufshcd_lrb * lrbp,struct cq_entry * cqe)809 static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
810 struct cq_entry *cqe)
811 {
812 if (cqe)
813 return le32_to_cpu(cqe->status) & MASK_OCS;
814
815 return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
816 }
817
818 /**
819 * ufshcd_utrl_clear() - Clear requests from the controller request list.
820 * @hba: per adapter instance
821 * @mask: mask with one bit set for each request to be cleared
822 */
ufshcd_utrl_clear(struct ufs_hba * hba,u32 mask)823 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
824 {
825 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
826 mask = ~mask;
827 /*
828 * From the UFSHCI specification: "UTP Transfer Request List CLear
829 * Register (UTRLCLR): This field is bit significant. Each bit
830 * corresponds to a slot in the UTP Transfer Request List, where bit 0
831 * corresponds to request slot 0. A bit in this field is set to ‘0’
832 * by host software to indicate to the host controller that a transfer
833 * request slot is cleared. The host controller
834 * shall free up any resources associated to the request slot
835 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
836 * host software indicates no change to request slots by setting the
837 * associated bits in this field to ‘1’. Bits in this field shall only
838 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
839 */
840 ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
841 }
842
843 /**
844 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
845 * @hba: per adapter instance
846 * @pos: position of the bit to be cleared
847 */
ufshcd_utmrl_clear(struct ufs_hba * hba,u32 pos)848 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
849 {
850 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
851 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
852 else
853 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
854 }
855
856 /**
857 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
858 * @reg: Register value of host controller status
859 *
860 * Return: 0 on success; a positive value if failed.
861 */
ufshcd_get_lists_status(u32 reg)862 static inline int ufshcd_get_lists_status(u32 reg)
863 {
864 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
865 }
866
867 /**
868 * ufshcd_get_uic_cmd_result - Get the UIC command result
869 * @hba: Pointer to adapter instance
870 *
871 * This function gets the result of UIC command completion
872 *
873 * Return: 0 on success; non-zero value on error.
874 */
ufshcd_get_uic_cmd_result(struct ufs_hba * hba)875 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
876 {
877 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
878 MASK_UIC_COMMAND_RESULT;
879 }
880
881 /**
882 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
883 * @hba: Pointer to adapter instance
884 *
885 * This function gets UIC command argument3
886 *
887 * Return: 0 on success; non-zero value on error.
888 */
ufshcd_get_dme_attr_val(struct ufs_hba * hba)889 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
890 {
891 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
892 }
893
894 /**
895 * ufshcd_get_req_rsp - returns the TR response transaction type
896 * @ucd_rsp_ptr: pointer to response UPIU
897 *
898 * Return: UPIU type.
899 */
900 static inline enum upiu_response_transaction
ufshcd_get_req_rsp(struct utp_upiu_rsp * ucd_rsp_ptr)901 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
902 {
903 return ucd_rsp_ptr->header.transaction_code;
904 }
905
906 /**
907 * ufshcd_is_exception_event - Check if the device raised an exception event
908 * @ucd_rsp_ptr: pointer to response UPIU
909 *
910 * The function checks if the device raised an exception event indicated in
911 * the Device Information field of response UPIU.
912 *
913 * Return: true if exception is raised, false otherwise.
914 */
ufshcd_is_exception_event(struct utp_upiu_rsp * ucd_rsp_ptr)915 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
916 {
917 return ucd_rsp_ptr->header.device_information & 1;
918 }
919
920 /**
921 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
922 * @hba: per adapter instance
923 */
924 static inline void
ufshcd_reset_intr_aggr(struct ufs_hba * hba)925 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
926 {
927 ufshcd_writel(hba, INT_AGGR_ENABLE |
928 INT_AGGR_COUNTER_AND_TIMER_RESET,
929 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
930 }
931
932 /**
933 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
934 * @hba: per adapter instance
935 * @cnt: Interrupt aggregation counter threshold
936 * @tmout: Interrupt aggregation timeout value
937 */
938 static inline void
ufshcd_config_intr_aggr(struct ufs_hba * hba,u8 cnt,u8 tmout)939 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
940 {
941 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
942 INT_AGGR_COUNTER_THLD_VAL(cnt) |
943 INT_AGGR_TIMEOUT_VAL(tmout),
944 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
945 }
946
947 /**
948 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
949 * @hba: per adapter instance
950 */
ufshcd_disable_intr_aggr(struct ufs_hba * hba)951 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
952 {
953 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
954 }
955
956 /**
957 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
958 * When run-stop registers are set to 1, it indicates the
959 * host controller that it can process the requests
960 * @hba: per adapter instance
961 */
ufshcd_enable_run_stop_reg(struct ufs_hba * hba)962 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
963 {
964 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
965 REG_UTP_TASK_REQ_LIST_RUN_STOP);
966 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
967 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
968 }
969
970 /**
971 * ufshcd_hba_start - Start controller initialization sequence
972 * @hba: per adapter instance
973 */
ufshcd_hba_start(struct ufs_hba * hba)974 static inline void ufshcd_hba_start(struct ufs_hba *hba)
975 {
976 u32 val = CONTROLLER_ENABLE;
977
978 if (ufshcd_crypto_enable(hba))
979 val |= CRYPTO_GENERAL_ENABLE;
980
981 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
982 }
983
984 /**
985 * ufshcd_is_hba_active - Get controller state
986 * @hba: per adapter instance
987 *
988 * Return: true if and only if the controller is active.
989 */
ufshcd_is_hba_active(struct ufs_hba * hba)990 bool ufshcd_is_hba_active(struct ufs_hba *hba)
991 {
992 return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
993 }
994 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
995
996 /**
997 * ufshcd_pm_qos_init - initialize PM QoS request
998 * @hba: per adapter instance
999 */
ufshcd_pm_qos_init(struct ufs_hba * hba)1000 void ufshcd_pm_qos_init(struct ufs_hba *hba)
1001 {
1002
1003 if (hba->pm_qos_enabled)
1004 return;
1005
1006 cpu_latency_qos_add_request(&hba->pm_qos_req, PM_QOS_DEFAULT_VALUE);
1007
1008 if (cpu_latency_qos_request_active(&hba->pm_qos_req))
1009 hba->pm_qos_enabled = true;
1010 }
1011
1012 /**
1013 * ufshcd_pm_qos_exit - remove request from PM QoS
1014 * @hba: per adapter instance
1015 */
ufshcd_pm_qos_exit(struct ufs_hba * hba)1016 void ufshcd_pm_qos_exit(struct ufs_hba *hba)
1017 {
1018 if (!hba->pm_qos_enabled)
1019 return;
1020
1021 cpu_latency_qos_remove_request(&hba->pm_qos_req);
1022 hba->pm_qos_enabled = false;
1023 }
1024
1025 /**
1026 * ufshcd_pm_qos_update - update PM QoS request
1027 * @hba: per adapter instance
1028 * @on: If True, vote for perf PM QoS mode otherwise power save mode
1029 */
ufshcd_pm_qos_update(struct ufs_hba * hba,bool on)1030 static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
1031 {
1032 if (!hba->pm_qos_enabled)
1033 return;
1034
1035 cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
1036 }
1037
1038 /**
1039 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1040 * @hba: per adapter instance
1041 * @scale_up: If True, set max possible frequency othewise set low frequency
1042 *
1043 * Return: 0 if successful; < 0 upon failure.
1044 */
ufshcd_set_clk_freq(struct ufs_hba * hba,bool scale_up)1045 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
1046 {
1047 int ret = 0;
1048 struct ufs_clk_info *clki;
1049 struct list_head *head = &hba->clk_list_head;
1050
1051 if (list_empty(head))
1052 goto out;
1053
1054 list_for_each_entry(clki, head, list) {
1055 if (!IS_ERR_OR_NULL(clki->clk)) {
1056 if (scale_up && clki->max_freq) {
1057 if (clki->curr_freq == clki->max_freq)
1058 continue;
1059
1060 ret = clk_set_rate(clki->clk, clki->max_freq);
1061 if (ret) {
1062 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1063 __func__, clki->name,
1064 clki->max_freq, ret);
1065 break;
1066 }
1067 trace_ufshcd_clk_scaling(hba,
1068 "scaled up", clki->name,
1069 clki->curr_freq,
1070 clki->max_freq);
1071
1072 clki->curr_freq = clki->max_freq;
1073
1074 } else if (!scale_up && clki->min_freq) {
1075 if (clki->curr_freq == clki->min_freq)
1076 continue;
1077
1078 ret = clk_set_rate(clki->clk, clki->min_freq);
1079 if (ret) {
1080 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
1081 __func__, clki->name,
1082 clki->min_freq, ret);
1083 break;
1084 }
1085 trace_ufshcd_clk_scaling(hba,
1086 "scaled down", clki->name,
1087 clki->curr_freq,
1088 clki->min_freq);
1089 clki->curr_freq = clki->min_freq;
1090 }
1091 }
1092 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1093 clki->name, clk_get_rate(clki->clk));
1094 }
1095
1096 out:
1097 return ret;
1098 }
1099
ufshcd_opp_config_clks(struct device * dev,struct opp_table * opp_table,struct dev_pm_opp * opp,void * data,bool scaling_down)1100 int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
1101 struct dev_pm_opp *opp, void *data,
1102 bool scaling_down)
1103 {
1104 struct ufs_hba *hba = dev_get_drvdata(dev);
1105 struct list_head *head = &hba->clk_list_head;
1106 struct ufs_clk_info *clki;
1107 unsigned long freq;
1108 u8 idx = 0;
1109 int ret;
1110
1111 list_for_each_entry(clki, head, list) {
1112 if (!IS_ERR_OR_NULL(clki->clk)) {
1113 freq = dev_pm_opp_get_freq_indexed(opp, idx++);
1114
1115 /* Do not set rate for clocks having frequency as 0 */
1116 if (!freq)
1117 continue;
1118
1119 ret = clk_set_rate(clki->clk, freq);
1120 if (ret) {
1121 dev_err(dev, "%s: %s clk set rate(%ldHz) failed, %d\n",
1122 __func__, clki->name, freq, ret);
1123 return ret;
1124 }
1125
1126 trace_ufshcd_clk_scaling(hba,
1127 (scaling_down ? "scaled down" : "scaled up"),
1128 clki->name, hba->clk_scaling.target_freq, freq);
1129 }
1130 }
1131
1132 return 0;
1133 }
1134 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks);
1135
ufshcd_opp_set_rate(struct ufs_hba * hba,unsigned long freq)1136 static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
1137 {
1138 struct dev_pm_opp *opp;
1139 int ret;
1140
1141 opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
1142 &freq, 0);
1143 if (IS_ERR(opp))
1144 return PTR_ERR(opp);
1145
1146 ret = dev_pm_opp_set_opp(hba->dev, opp);
1147 dev_pm_opp_put(opp);
1148
1149 return ret;
1150 }
1151
1152 /**
1153 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1154 * @hba: per adapter instance
1155 * @freq: frequency to scale
1156 * @scale_up: True if scaling up and false if scaling down
1157 *
1158 * Return: 0 if successful; < 0 upon failure.
1159 */
ufshcd_scale_clks(struct ufs_hba * hba,unsigned long freq,bool scale_up)1160 static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
1161 bool scale_up)
1162 {
1163 int ret = 0;
1164 ktime_t start = ktime_get();
1165
1166 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
1167 if (ret)
1168 goto out;
1169
1170 if (hba->use_pm_opp)
1171 ret = ufshcd_opp_set_rate(hba, freq);
1172 else
1173 ret = ufshcd_set_clk_freq(hba, scale_up);
1174 if (ret)
1175 goto out;
1176
1177 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
1178 if (ret) {
1179 if (hba->use_pm_opp)
1180 ufshcd_opp_set_rate(hba,
1181 hba->devfreq->previous_freq);
1182 else
1183 ufshcd_set_clk_freq(hba, !scale_up);
1184 goto out;
1185 }
1186
1187 ufshcd_pm_qos_update(hba, scale_up);
1188
1189 out:
1190 trace_ufshcd_profile_clk_scaling(hba,
1191 (scale_up ? "up" : "down"),
1192 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1193 return ret;
1194 }
1195
1196 /**
1197 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1198 * @hba: per adapter instance
1199 * @freq: frequency to scale
1200 * @scale_up: True if scaling up and false if scaling down
1201 *
1202 * Return: true if scaling is required, false otherwise.
1203 */
ufshcd_is_devfreq_scaling_required(struct ufs_hba * hba,unsigned long freq,bool scale_up)1204 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1205 unsigned long freq, bool scale_up)
1206 {
1207 struct ufs_clk_info *clki;
1208 struct list_head *head = &hba->clk_list_head;
1209
1210 if (list_empty(head))
1211 return false;
1212
1213 if (hba->use_pm_opp)
1214 return freq != hba->clk_scaling.target_freq;
1215
1216 list_for_each_entry(clki, head, list) {
1217 if (!IS_ERR_OR_NULL(clki->clk)) {
1218 if (scale_up && clki->max_freq) {
1219 if (clki->curr_freq == clki->max_freq)
1220 continue;
1221 return true;
1222 } else if (!scale_up && clki->min_freq) {
1223 if (clki->curr_freq == clki->min_freq)
1224 continue;
1225 return true;
1226 }
1227 }
1228 }
1229
1230 return false;
1231 }
1232
1233 /*
1234 * Determine the number of pending commands by counting the bits in the SCSI
1235 * device budget maps. This approach has been selected because a bit is set in
1236 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1237 * flag. The host_self_blocked flag can be modified by calling
1238 * scsi_block_requests() or scsi_unblock_requests().
1239 */
ufshcd_pending_cmds(struct ufs_hba * hba)1240 static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
1241 {
1242 const struct scsi_device *sdev;
1243 unsigned long flags;
1244 u32 pending = 0;
1245
1246 spin_lock_irqsave(hba->host->host_lock, flags);
1247 __shost_for_each_device(sdev, hba->host)
1248 pending += sbitmap_weight(&sdev->budget_map);
1249 spin_unlock_irqrestore(hba->host->host_lock, flags);
1250
1251 return pending;
1252 }
1253
1254 /*
1255 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1256 * has expired.
1257 *
1258 * Return: 0 upon success; -EBUSY upon timeout.
1259 */
ufshcd_wait_for_doorbell_clr(struct ufs_hba * hba,u64 wait_timeout_us)1260 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1261 u64 wait_timeout_us)
1262 {
1263 int ret = 0;
1264 u32 tm_doorbell;
1265 u32 tr_pending;
1266 bool timeout = false, do_last_check = false;
1267 ktime_t start;
1268
1269 ufshcd_hold(hba);
1270 /*
1271 * Wait for all the outstanding tasks/transfer requests.
1272 * Verify by checking the doorbell registers are clear.
1273 */
1274 start = ktime_get();
1275 do {
1276 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1277 ret = -EBUSY;
1278 goto out;
1279 }
1280
1281 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1282 tr_pending = ufshcd_pending_cmds(hba);
1283 if (!tm_doorbell && !tr_pending) {
1284 timeout = false;
1285 break;
1286 } else if (do_last_check) {
1287 break;
1288 }
1289
1290 io_schedule_timeout(msecs_to_jiffies(20));
1291 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1292 wait_timeout_us) {
1293 timeout = true;
1294 /*
1295 * We might have scheduled out for long time so make
1296 * sure to check if doorbells are cleared by this time
1297 * or not.
1298 */
1299 do_last_check = true;
1300 }
1301 } while (tm_doorbell || tr_pending);
1302
1303 if (timeout) {
1304 dev_err(hba->dev,
1305 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1306 __func__, tm_doorbell, tr_pending);
1307 ret = -EBUSY;
1308 }
1309 out:
1310 ufshcd_release(hba);
1311 return ret;
1312 }
1313
1314 /**
1315 * ufshcd_scale_gear - scale up/down UFS gear
1316 * @hba: per adapter instance
1317 * @target_gear: target gear to scale to
1318 * @scale_up: True for scaling up gear and false for scaling down
1319 *
1320 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1321 * non-zero for any other errors.
1322 */
ufshcd_scale_gear(struct ufs_hba * hba,u32 target_gear,bool scale_up)1323 static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
1324 {
1325 int ret = 0;
1326 struct ufs_pa_layer_attr new_pwr_info;
1327
1328 if (target_gear) {
1329 new_pwr_info = hba->pwr_info;
1330 new_pwr_info.gear_tx = target_gear;
1331 new_pwr_info.gear_rx = target_gear;
1332
1333 goto config_pwr_mode;
1334 }
1335
1336 /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
1337 if (scale_up) {
1338 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
1339 sizeof(struct ufs_pa_layer_attr));
1340 } else {
1341 memcpy(&new_pwr_info, &hba->pwr_info,
1342 sizeof(struct ufs_pa_layer_attr));
1343
1344 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
1345 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
1346 /* save the current power mode */
1347 memcpy(&hba->clk_scaling.saved_pwr_info,
1348 &hba->pwr_info,
1349 sizeof(struct ufs_pa_layer_attr));
1350
1351 /* scale down gear */
1352 new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
1353 new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
1354 }
1355 }
1356
1357 config_pwr_mode:
1358 /* check if the power mode needs to be changed or not? */
1359 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
1360 if (ret)
1361 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1362 __func__, ret,
1363 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1364 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1365
1366 return ret;
1367 }
1368
1369 /*
1370 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1371 * has expired.
1372 *
1373 * Return: 0 upon success; -EBUSY upon timeout.
1374 */
ufshcd_clock_scaling_prepare(struct ufs_hba * hba,u64 timeout_us)1375 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
1376 {
1377 int ret = 0;
1378 /*
1379 * make sure that there are no outstanding requests when
1380 * clock scaling is in progress
1381 */
1382 blk_mq_quiesce_tagset(&hba->host->tag_set);
1383 mutex_lock(&hba->wb_mutex);
1384 down_write(&hba->clk_scaling_lock);
1385
1386 if (!hba->clk_scaling.is_allowed ||
1387 ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
1388 ret = -EBUSY;
1389 up_write(&hba->clk_scaling_lock);
1390 mutex_unlock(&hba->wb_mutex);
1391 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1392 goto out;
1393 }
1394
1395 /* let's not get into low power until clock scaling is completed */
1396 ufshcd_hold(hba);
1397
1398 out:
1399 return ret;
1400 }
1401
ufshcd_clock_scaling_unprepare(struct ufs_hba * hba,int err)1402 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
1403 {
1404 up_write(&hba->clk_scaling_lock);
1405
1406 /* Enable Write Booster if current gear requires it else disable it */
1407 if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
1408 ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
1409
1410 mutex_unlock(&hba->wb_mutex);
1411
1412 blk_mq_unquiesce_tagset(&hba->host->tag_set);
1413 ufshcd_release(hba);
1414 }
1415
1416 /**
1417 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1418 * @hba: per adapter instance
1419 * @freq: frequency to scale
1420 * @scale_up: True for scaling up and false for scalin down
1421 *
1422 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1423 * for any other errors.
1424 */
ufshcd_devfreq_scale(struct ufs_hba * hba,unsigned long freq,bool scale_up)1425 static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
1426 bool scale_up)
1427 {
1428 u32 old_gear = hba->pwr_info.gear_rx;
1429 u32 new_gear = 0;
1430 int ret = 0;
1431
1432 new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
1433
1434 ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
1435 if (ret)
1436 return ret;
1437
1438 /* scale down the gear before scaling down clocks */
1439 if (!scale_up) {
1440 ret = ufshcd_scale_gear(hba, new_gear, false);
1441 if (ret)
1442 goto out_unprepare;
1443 }
1444
1445 ret = ufshcd_scale_clks(hba, freq, scale_up);
1446 if (ret) {
1447 if (!scale_up)
1448 ufshcd_scale_gear(hba, old_gear, true);
1449 goto out_unprepare;
1450 }
1451
1452 /* scale up the gear after scaling up clocks */
1453 if (scale_up) {
1454 ret = ufshcd_scale_gear(hba, new_gear, true);
1455 if (ret) {
1456 ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
1457 false);
1458 goto out_unprepare;
1459 }
1460 }
1461
1462 out_unprepare:
1463 ufshcd_clock_scaling_unprepare(hba, ret);
1464 return ret;
1465 }
1466
ufshcd_clk_scaling_suspend_work(struct work_struct * work)1467 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1468 {
1469 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1470 clk_scaling.suspend_work);
1471
1472 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1473 {
1474 if (hba->clk_scaling.active_reqs ||
1475 hba->clk_scaling.is_suspended)
1476 return;
1477
1478 hba->clk_scaling.is_suspended = true;
1479 hba->clk_scaling.window_start_t = 0;
1480 }
1481
1482 devfreq_suspend_device(hba->devfreq);
1483 }
1484
ufshcd_clk_scaling_resume_work(struct work_struct * work)1485 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1486 {
1487 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1488 clk_scaling.resume_work);
1489
1490 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1491 {
1492 if (!hba->clk_scaling.is_suspended)
1493 return;
1494 hba->clk_scaling.is_suspended = false;
1495 }
1496
1497 devfreq_resume_device(hba->devfreq);
1498 }
1499
ufshcd_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)1500 static int ufshcd_devfreq_target(struct device *dev,
1501 unsigned long *freq, u32 flags)
1502 {
1503 int ret = 0;
1504 struct ufs_hba *hba = dev_get_drvdata(dev);
1505 ktime_t start;
1506 bool scale_up = false, sched_clk_scaling_suspend_work = false;
1507 struct list_head *clk_list = &hba->clk_list_head;
1508 struct ufs_clk_info *clki;
1509
1510 if (!ufshcd_is_clkscaling_supported(hba))
1511 return -EINVAL;
1512
1513 if (hba->use_pm_opp) {
1514 struct dev_pm_opp *opp;
1515
1516 /* Get the recommended frequency from OPP framework */
1517 opp = devfreq_recommended_opp(dev, freq, flags);
1518 if (IS_ERR(opp))
1519 return PTR_ERR(opp);
1520
1521 dev_pm_opp_put(opp);
1522 } else {
1523 /* Override with the closest supported frequency */
1524 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
1525 list);
1526 *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
1527 }
1528
1529 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1530 {
1531 if (ufshcd_eh_in_progress(hba))
1532 return 0;
1533
1534 /* Skip scaling clock when clock scaling is suspended */
1535 if (hba->clk_scaling.is_suspended) {
1536 dev_warn(hba->dev, "clock scaling is suspended, skip");
1537 return 0;
1538 }
1539
1540 if (!hba->clk_scaling.active_reqs)
1541 sched_clk_scaling_suspend_work = true;
1542
1543 if (list_empty(clk_list))
1544 goto out;
1545
1546 /* Decide based on the target or rounded-off frequency and update */
1547 if (hba->use_pm_opp)
1548 scale_up = *freq > hba->clk_scaling.target_freq;
1549 else
1550 scale_up = *freq == clki->max_freq;
1551
1552 if (!hba->use_pm_opp && !scale_up)
1553 *freq = clki->min_freq;
1554
1555 /* Update the frequency */
1556 if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
1557 ret = 0;
1558 goto out; /* no state change required */
1559 }
1560 }
1561
1562 start = ktime_get();
1563 ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
1564 if (!ret)
1565 hba->clk_scaling.target_freq = *freq;
1566
1567 trace_ufshcd_profile_clk_scaling(hba,
1568 (scale_up ? "up" : "down"),
1569 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1570
1571 out:
1572 if (sched_clk_scaling_suspend_work &&
1573 (!scale_up || hba->clk_scaling.suspend_on_no_request))
1574 queue_work(hba->clk_scaling.workq,
1575 &hba->clk_scaling.suspend_work);
1576
1577 return ret;
1578 }
1579
ufshcd_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)1580 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1581 struct devfreq_dev_status *stat)
1582 {
1583 struct ufs_hba *hba = dev_get_drvdata(dev);
1584 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1585 ktime_t curr_t;
1586
1587 if (!ufshcd_is_clkscaling_supported(hba))
1588 return -EINVAL;
1589
1590 memset(stat, 0, sizeof(*stat));
1591
1592 guard(spinlock_irqsave)(&hba->clk_scaling.lock);
1593
1594 curr_t = ktime_get();
1595 if (!scaling->window_start_t)
1596 goto start_window;
1597
1598 /*
1599 * If current frequency is 0, then the ondemand governor considers
1600 * there's no initial frequency set. And it always requests to set
1601 * to max. frequency.
1602 */
1603 if (hba->use_pm_opp) {
1604 stat->current_frequency = hba->clk_scaling.target_freq;
1605 } else {
1606 struct list_head *clk_list = &hba->clk_list_head;
1607 struct ufs_clk_info *clki;
1608
1609 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1610 stat->current_frequency = clki->curr_freq;
1611 }
1612
1613 if (scaling->is_busy_started)
1614 scaling->tot_busy_t += ktime_us_delta(curr_t,
1615 scaling->busy_start_t);
1616 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
1617 stat->busy_time = scaling->tot_busy_t;
1618 start_window:
1619 scaling->window_start_t = curr_t;
1620 scaling->tot_busy_t = 0;
1621
1622 if (scaling->active_reqs) {
1623 scaling->busy_start_t = curr_t;
1624 scaling->is_busy_started = true;
1625 } else {
1626 scaling->busy_start_t = 0;
1627 scaling->is_busy_started = false;
1628 }
1629
1630 return 0;
1631 }
1632
ufshcd_devfreq_init(struct ufs_hba * hba)1633 static int ufshcd_devfreq_init(struct ufs_hba *hba)
1634 {
1635 struct list_head *clk_list = &hba->clk_list_head;
1636 struct ufs_clk_info *clki;
1637 struct devfreq *devfreq;
1638 int ret;
1639
1640 /* Skip devfreq if we don't have any clocks in the list */
1641 if (list_empty(clk_list))
1642 return 0;
1643
1644 if (!hba->use_pm_opp) {
1645 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1646 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1647 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1648 }
1649
1650 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
1651 &hba->vps->ondemand_data);
1652 devfreq = devfreq_add_device(hba->dev,
1653 &hba->vps->devfreq_profile,
1654 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1655 &hba->vps->ondemand_data);
1656 if (IS_ERR(devfreq)) {
1657 ret = PTR_ERR(devfreq);
1658 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1659
1660 if (!hba->use_pm_opp) {
1661 dev_pm_opp_remove(hba->dev, clki->min_freq);
1662 dev_pm_opp_remove(hba->dev, clki->max_freq);
1663 }
1664 return ret;
1665 }
1666
1667 hba->devfreq = devfreq;
1668
1669 return 0;
1670 }
1671
ufshcd_devfreq_remove(struct ufs_hba * hba)1672 static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1673 {
1674 struct list_head *clk_list = &hba->clk_list_head;
1675
1676 if (!hba->devfreq)
1677 return;
1678
1679 devfreq_remove_device(hba->devfreq);
1680 hba->devfreq = NULL;
1681
1682 if (!hba->use_pm_opp) {
1683 struct ufs_clk_info *clki;
1684
1685 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1686 dev_pm_opp_remove(hba->dev, clki->min_freq);
1687 dev_pm_opp_remove(hba->dev, clki->max_freq);
1688 }
1689 }
1690
ufshcd_suspend_clkscaling(struct ufs_hba * hba)1691 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1692 {
1693 bool suspend = false;
1694
1695 cancel_work_sync(&hba->clk_scaling.suspend_work);
1696 cancel_work_sync(&hba->clk_scaling.resume_work);
1697
1698 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1699 {
1700 if (!hba->clk_scaling.is_suspended) {
1701 suspend = true;
1702 hba->clk_scaling.is_suspended = true;
1703 hba->clk_scaling.window_start_t = 0;
1704 }
1705 }
1706
1707 if (suspend)
1708 devfreq_suspend_device(hba->devfreq);
1709 }
1710
ufshcd_resume_clkscaling(struct ufs_hba * hba)1711 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1712 {
1713 bool resume = false;
1714
1715 scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
1716 {
1717 if (hba->clk_scaling.is_suspended) {
1718 resume = true;
1719 hba->clk_scaling.is_suspended = false;
1720 }
1721 }
1722
1723 if (resume)
1724 devfreq_resume_device(hba->devfreq);
1725 }
1726
ufshcd_clkscale_enable_show(struct device * dev,struct device_attribute * attr,char * buf)1727 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1728 struct device_attribute *attr, char *buf)
1729 {
1730 struct ufs_hba *hba = dev_get_drvdata(dev);
1731
1732 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
1733 }
1734
ufshcd_clkscale_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1735 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1736 struct device_attribute *attr, const char *buf, size_t count)
1737 {
1738 struct ufs_hba *hba = dev_get_drvdata(dev);
1739 struct ufs_clk_info *clki;
1740 unsigned long freq;
1741 u32 value;
1742 int err = 0;
1743
1744 if (kstrtou32(buf, 0, &value))
1745 return -EINVAL;
1746
1747 down(&hba->host_sem);
1748 if (!ufshcd_is_user_access_allowed(hba)) {
1749 err = -EBUSY;
1750 goto out;
1751 }
1752
1753 value = !!value;
1754 if (value == hba->clk_scaling.is_enabled)
1755 goto out;
1756
1757 ufshcd_rpm_get_sync(hba);
1758 ufshcd_hold(hba);
1759
1760 hba->clk_scaling.is_enabled = value;
1761
1762 if (value) {
1763 ufshcd_resume_clkscaling(hba);
1764 goto out_rel;
1765 }
1766
1767 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1768 freq = clki->max_freq;
1769
1770 ufshcd_suspend_clkscaling(hba);
1771
1772 if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
1773 goto out_rel;
1774
1775 err = ufshcd_devfreq_scale(hba, freq, true);
1776 if (err)
1777 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1778 __func__, err);
1779 else
1780 hba->clk_scaling.target_freq = freq;
1781
1782 out_rel:
1783 ufshcd_release(hba);
1784 ufshcd_rpm_put_sync(hba);
1785 out:
1786 up(&hba->host_sem);
1787 return err ? err : count;
1788 }
1789
ufshcd_init_clk_scaling_sysfs(struct ufs_hba * hba)1790 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
1791 {
1792 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1793 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1794 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1795 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1796 hba->clk_scaling.enable_attr.attr.mode = 0644;
1797 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1798 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1799 }
1800
ufshcd_remove_clk_scaling_sysfs(struct ufs_hba * hba)1801 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
1802 {
1803 if (hba->clk_scaling.enable_attr.attr.name)
1804 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
1805 }
1806
ufshcd_init_clk_scaling(struct ufs_hba * hba)1807 static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1808 {
1809 if (!ufshcd_is_clkscaling_supported(hba))
1810 return;
1811
1812 if (!hba->clk_scaling.min_gear)
1813 hba->clk_scaling.min_gear = UFS_HS_G1;
1814
1815 if (!hba->clk_scaling.wb_gear)
1816 /* Use intermediate gear speed HS_G3 as the default wb_gear */
1817 hba->clk_scaling.wb_gear = UFS_HS_G3;
1818
1819 INIT_WORK(&hba->clk_scaling.suspend_work,
1820 ufshcd_clk_scaling_suspend_work);
1821 INIT_WORK(&hba->clk_scaling.resume_work,
1822 ufshcd_clk_scaling_resume_work);
1823
1824 spin_lock_init(&hba->clk_scaling.lock);
1825
1826 hba->clk_scaling.workq = alloc_ordered_workqueue(
1827 "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
1828
1829 hba->clk_scaling.is_initialized = true;
1830 }
1831
ufshcd_exit_clk_scaling(struct ufs_hba * hba)1832 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1833 {
1834 if (!hba->clk_scaling.is_initialized)
1835 return;
1836
1837 ufshcd_remove_clk_scaling_sysfs(hba);
1838 destroy_workqueue(hba->clk_scaling.workq);
1839 ufshcd_devfreq_remove(hba);
1840 hba->clk_scaling.is_initialized = false;
1841 }
1842
ufshcd_ungate_work(struct work_struct * work)1843 static void ufshcd_ungate_work(struct work_struct *work)
1844 {
1845 int ret;
1846 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1847 clk_gating.ungate_work);
1848
1849 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1850
1851 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
1852 if (hba->clk_gating.state == CLKS_ON)
1853 return;
1854 }
1855
1856 ufshcd_hba_vreg_set_hpm(hba);
1857 ufshcd_setup_clocks(hba, true);
1858
1859 ufshcd_enable_irq(hba);
1860
1861 /* Exit from hibern8 */
1862 if (ufshcd_can_hibern8_during_gating(hba)) {
1863 /* Prevent gating in this path */
1864 hba->clk_gating.is_suspended = true;
1865 if (ufshcd_is_link_hibern8(hba)) {
1866 ret = ufshcd_uic_hibern8_exit(hba);
1867 if (ret)
1868 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1869 __func__, ret);
1870 else
1871 ufshcd_set_link_active(hba);
1872 }
1873 hba->clk_gating.is_suspended = false;
1874 }
1875 }
1876
1877 /**
1878 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1879 * Also, exit from hibern8 mode and set the link as active.
1880 * @hba: per adapter instance
1881 */
ufshcd_hold(struct ufs_hba * hba)1882 void ufshcd_hold(struct ufs_hba *hba)
1883 {
1884 bool flush_result;
1885 unsigned long flags;
1886
1887 if (!ufshcd_is_clkgating_allowed(hba) ||
1888 !hba->clk_gating.is_initialized)
1889 return;
1890 spin_lock_irqsave(&hba->clk_gating.lock, flags);
1891 hba->clk_gating.active_reqs++;
1892
1893 start:
1894 switch (hba->clk_gating.state) {
1895 case CLKS_ON:
1896 /*
1897 * Wait for the ungate work to complete if in progress.
1898 * Though the clocks may be in ON state, the link could
1899 * still be in hibner8 state if hibern8 is allowed
1900 * during clock gating.
1901 * Make sure we exit hibern8 state also in addition to
1902 * clocks being ON.
1903 */
1904 if (ufshcd_can_hibern8_during_gating(hba) &&
1905 ufshcd_is_link_hibern8(hba)) {
1906 spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
1907 flush_result = flush_work(&hba->clk_gating.ungate_work);
1908 if (hba->clk_gating.is_suspended && !flush_result)
1909 return;
1910 spin_lock_irqsave(&hba->clk_gating.lock, flags);
1911 goto start;
1912 }
1913 break;
1914 case REQ_CLKS_OFF:
1915 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1916 hba->clk_gating.state = CLKS_ON;
1917 trace_ufshcd_clk_gating(hba,
1918 hba->clk_gating.state);
1919 break;
1920 }
1921 /*
1922 * If we are here, it means gating work is either done or
1923 * currently running. Hence, fall through to cancel gating
1924 * work and to enable clocks.
1925 */
1926 fallthrough;
1927 case CLKS_OFF:
1928 hba->clk_gating.state = REQ_CLKS_ON;
1929 trace_ufshcd_clk_gating(hba,
1930 hba->clk_gating.state);
1931 queue_work(hba->clk_gating.clk_gating_workq,
1932 &hba->clk_gating.ungate_work);
1933 /*
1934 * fall through to check if we should wait for this
1935 * work to be done or not.
1936 */
1937 fallthrough;
1938 case REQ_CLKS_ON:
1939 spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
1940 flush_work(&hba->clk_gating.ungate_work);
1941 /* Make sure state is CLKS_ON before returning */
1942 spin_lock_irqsave(&hba->clk_gating.lock, flags);
1943 goto start;
1944 default:
1945 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1946 __func__, hba->clk_gating.state);
1947 break;
1948 }
1949 spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
1950 }
1951 EXPORT_SYMBOL_GPL(ufshcd_hold);
1952
ufshcd_gate_work(struct work_struct * work)1953 static void ufshcd_gate_work(struct work_struct *work)
1954 {
1955 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1956 clk_gating.gate_work.work);
1957 int ret;
1958
1959 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
1960 /*
1961 * In case you are here to cancel this work the gating state
1962 * would be marked as REQ_CLKS_ON. In this case save time by
1963 * skipping the gating work and exit after changing the clock
1964 * state to CLKS_ON.
1965 */
1966 if (hba->clk_gating.is_suspended ||
1967 hba->clk_gating.state != REQ_CLKS_OFF) {
1968 hba->clk_gating.state = CLKS_ON;
1969 trace_ufshcd_clk_gating(hba,
1970 hba->clk_gating.state);
1971 return;
1972 }
1973
1974 if (hba->clk_gating.active_reqs)
1975 return;
1976 }
1977
1978 scoped_guard(spinlock_irqsave, hba->host->host_lock) {
1979 if (ufshcd_is_ufs_dev_busy(hba) ||
1980 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1981 return;
1982 }
1983
1984 /* put the link into hibern8 mode before turning off clocks */
1985 if (ufshcd_can_hibern8_during_gating(hba)) {
1986 ret = ufshcd_uic_hibern8_enter(hba);
1987 if (ret) {
1988 hba->clk_gating.state = CLKS_ON;
1989 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
1990 __func__, ret);
1991 trace_ufshcd_clk_gating(hba,
1992 hba->clk_gating.state);
1993 return;
1994 }
1995 ufshcd_set_link_hibern8(hba);
1996 }
1997
1998 ufshcd_disable_irq(hba);
1999
2000 ufshcd_setup_clocks(hba, false);
2001
2002 /* Put the host controller in low power mode if possible */
2003 ufshcd_hba_vreg_set_lpm(hba);
2004 /*
2005 * In case you are here to cancel this work the gating state
2006 * would be marked as REQ_CLKS_ON. In this case keep the state
2007 * as REQ_CLKS_ON which would anyway imply that clocks are off
2008 * and a request to turn them on is pending. By doing this way,
2009 * we keep the state machine in tact and this would ultimately
2010 * prevent from doing cancel work multiple times when there are
2011 * new requests arriving before the current cancel work is done.
2012 */
2013 guard(spinlock_irqsave)(&hba->clk_gating.lock);
2014 if (hba->clk_gating.state == REQ_CLKS_OFF) {
2015 hba->clk_gating.state = CLKS_OFF;
2016 trace_ufshcd_clk_gating(hba,
2017 hba->clk_gating.state);
2018 }
2019 }
2020
__ufshcd_release(struct ufs_hba * hba)2021 static void __ufshcd_release(struct ufs_hba *hba)
2022 {
2023 lockdep_assert_held(&hba->clk_gating.lock);
2024
2025 if (!ufshcd_is_clkgating_allowed(hba))
2026 return;
2027
2028 hba->clk_gating.active_reqs--;
2029
2030 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
2031 !hba->clk_gating.is_initialized ||
2032 hba->clk_gating.state == CLKS_OFF)
2033 return;
2034
2035 scoped_guard(spinlock_irqsave, hba->host->host_lock) {
2036 if (ufshcd_has_pending_tasks(hba) ||
2037 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
2038 return;
2039 }
2040
2041 hba->clk_gating.state = REQ_CLKS_OFF;
2042 trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
2043 queue_delayed_work(hba->clk_gating.clk_gating_workq,
2044 &hba->clk_gating.gate_work,
2045 msecs_to_jiffies(hba->clk_gating.delay_ms));
2046 }
2047
ufshcd_release(struct ufs_hba * hba)2048 void ufshcd_release(struct ufs_hba *hba)
2049 {
2050 guard(spinlock_irqsave)(&hba->clk_gating.lock);
2051 __ufshcd_release(hba);
2052 }
2053 EXPORT_SYMBOL_GPL(ufshcd_release);
2054
ufshcd_clkgate_delay_show(struct device * dev,struct device_attribute * attr,char * buf)2055 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
2056 struct device_attribute *attr, char *buf)
2057 {
2058 struct ufs_hba *hba = dev_get_drvdata(dev);
2059
2060 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
2061 }
2062
ufshcd_clkgate_delay_set(struct device * dev,unsigned long value)2063 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
2064 {
2065 struct ufs_hba *hba = dev_get_drvdata(dev);
2066
2067 guard(spinlock_irqsave)(&hba->clk_gating.lock);
2068 hba->clk_gating.delay_ms = value;
2069 }
2070 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
2071
ufshcd_clkgate_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2072 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
2073 struct device_attribute *attr, const char *buf, size_t count)
2074 {
2075 unsigned long value;
2076
2077 if (kstrtoul(buf, 0, &value))
2078 return -EINVAL;
2079
2080 ufshcd_clkgate_delay_set(dev, value);
2081 return count;
2082 }
2083
ufshcd_clkgate_enable_show(struct device * dev,struct device_attribute * attr,char * buf)2084 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
2085 struct device_attribute *attr, char *buf)
2086 {
2087 struct ufs_hba *hba = dev_get_drvdata(dev);
2088
2089 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
2090 }
2091
ufshcd_clkgate_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2092 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
2093 struct device_attribute *attr, const char *buf, size_t count)
2094 {
2095 struct ufs_hba *hba = dev_get_drvdata(dev);
2096 u32 value;
2097
2098 if (kstrtou32(buf, 0, &value))
2099 return -EINVAL;
2100
2101 value = !!value;
2102
2103 guard(spinlock_irqsave)(&hba->clk_gating.lock);
2104
2105 if (value == hba->clk_gating.is_enabled)
2106 return count;
2107
2108 if (value)
2109 __ufshcd_release(hba);
2110 else
2111 hba->clk_gating.active_reqs++;
2112
2113 hba->clk_gating.is_enabled = value;
2114
2115 return count;
2116 }
2117
ufshcd_init_clk_gating_sysfs(struct ufs_hba * hba)2118 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
2119 {
2120 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
2121 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
2122 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
2123 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
2124 hba->clk_gating.delay_attr.attr.mode = 0644;
2125 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
2126 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
2127
2128 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
2129 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
2130 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
2131 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
2132 hba->clk_gating.enable_attr.attr.mode = 0644;
2133 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
2134 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
2135 }
2136
ufshcd_remove_clk_gating_sysfs(struct ufs_hba * hba)2137 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
2138 {
2139 if (hba->clk_gating.delay_attr.attr.name)
2140 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
2141 if (hba->clk_gating.enable_attr.attr.name)
2142 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
2143 }
2144
ufshcd_init_clk_gating(struct ufs_hba * hba)2145 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
2146 {
2147 if (!ufshcd_is_clkgating_allowed(hba))
2148 return;
2149
2150 hba->clk_gating.state = CLKS_ON;
2151
2152 hba->clk_gating.delay_ms = 150;
2153 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
2154 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
2155
2156 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
2157 "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
2158 hba->host->host_no);
2159
2160 ufshcd_init_clk_gating_sysfs(hba);
2161
2162 hba->clk_gating.is_enabled = true;
2163 hba->clk_gating.is_initialized = true;
2164 }
2165
ufshcd_exit_clk_gating(struct ufs_hba * hba)2166 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
2167 {
2168 if (!hba->clk_gating.is_initialized)
2169 return;
2170
2171 ufshcd_remove_clk_gating_sysfs(hba);
2172
2173 /* Ungate the clock if necessary. */
2174 ufshcd_hold(hba);
2175 hba->clk_gating.is_initialized = false;
2176 ufshcd_release(hba);
2177
2178 destroy_workqueue(hba->clk_gating.clk_gating_workq);
2179 }
2180
ufshcd_clk_scaling_start_busy(struct ufs_hba * hba)2181 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
2182 {
2183 bool queue_resume_work = false;
2184 ktime_t curr_t = ktime_get();
2185
2186 if (!ufshcd_is_clkscaling_supported(hba))
2187 return;
2188
2189 guard(spinlock_irqsave)(&hba->clk_scaling.lock);
2190
2191 if (!hba->clk_scaling.active_reqs++)
2192 queue_resume_work = true;
2193
2194 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
2195 return;
2196
2197 if (queue_resume_work)
2198 queue_work(hba->clk_scaling.workq,
2199 &hba->clk_scaling.resume_work);
2200
2201 if (!hba->clk_scaling.window_start_t) {
2202 hba->clk_scaling.window_start_t = curr_t;
2203 hba->clk_scaling.tot_busy_t = 0;
2204 hba->clk_scaling.is_busy_started = false;
2205 }
2206
2207 if (!hba->clk_scaling.is_busy_started) {
2208 hba->clk_scaling.busy_start_t = curr_t;
2209 hba->clk_scaling.is_busy_started = true;
2210 }
2211 }
2212
ufshcd_clk_scaling_update_busy(struct ufs_hba * hba)2213 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
2214 {
2215 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
2216
2217 if (!ufshcd_is_clkscaling_supported(hba))
2218 return;
2219
2220 guard(spinlock_irqsave)(&hba->clk_scaling.lock);
2221
2222 hba->clk_scaling.active_reqs--;
2223 if (!scaling->active_reqs && scaling->is_busy_started) {
2224 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
2225 scaling->busy_start_t));
2226 scaling->busy_start_t = 0;
2227 scaling->is_busy_started = false;
2228 }
2229 }
2230
ufshcd_monitor_opcode2dir(u8 opcode)2231 static inline int ufshcd_monitor_opcode2dir(u8 opcode)
2232 {
2233 if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
2234 return READ;
2235 else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
2236 return WRITE;
2237 else
2238 return -EINVAL;
2239 }
2240
ufshcd_should_inform_monitor(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2241 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
2242 struct ufshcd_lrb *lrbp)
2243 {
2244 const struct ufs_hba_monitor *m = &hba->monitor;
2245
2246 return (m->enabled && lrbp && lrbp->cmd &&
2247 (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
2248 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
2249 }
2250
ufshcd_start_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2251 static void ufshcd_start_monitor(struct ufs_hba *hba,
2252 const struct ufshcd_lrb *lrbp)
2253 {
2254 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2255 unsigned long flags;
2256
2257 spin_lock_irqsave(hba->host->host_lock, flags);
2258 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
2259 hba->monitor.busy_start_ts[dir] = ktime_get();
2260 spin_unlock_irqrestore(hba->host->host_lock, flags);
2261 }
2262
ufshcd_update_monitor(struct ufs_hba * hba,const struct ufshcd_lrb * lrbp)2263 static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
2264 {
2265 int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
2266 unsigned long flags;
2267
2268 spin_lock_irqsave(hba->host->host_lock, flags);
2269 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
2270 const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
2271 struct ufs_hba_monitor *m = &hba->monitor;
2272 ktime_t now, inc, lat;
2273
2274 now = lrbp->compl_time_stamp;
2275 inc = ktime_sub(now, m->busy_start_ts[dir]);
2276 m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
2277 m->nr_sec_rw[dir] += blk_rq_sectors(req);
2278
2279 /* Update latencies */
2280 m->nr_req[dir]++;
2281 lat = ktime_sub(now, lrbp->issue_time_stamp);
2282 m->lat_sum[dir] += lat;
2283 if (m->lat_max[dir] < lat || !m->lat_max[dir])
2284 m->lat_max[dir] = lat;
2285 if (m->lat_min[dir] > lat || !m->lat_min[dir])
2286 m->lat_min[dir] = lat;
2287
2288 m->nr_queued[dir]--;
2289 /* Push forward the busy start of monitor */
2290 m->busy_start_ts[dir] = now;
2291 }
2292 spin_unlock_irqrestore(hba->host->host_lock, flags);
2293 }
2294
2295 /**
2296 * ufshcd_send_command - Send SCSI or device management commands
2297 * @hba: per adapter instance
2298 * @task_tag: Task tag of the command
2299 * @hwq: pointer to hardware queue instance
2300 */
2301 static inline
ufshcd_send_command(struct ufs_hba * hba,unsigned int task_tag,struct ufs_hw_queue * hwq)2302 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
2303 struct ufs_hw_queue *hwq)
2304 {
2305 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
2306 unsigned long flags;
2307
2308 lrbp->issue_time_stamp = ktime_get();
2309 lrbp->issue_time_stamp_local_clock = local_clock();
2310 lrbp->compl_time_stamp = ktime_set(0, 0);
2311 lrbp->compl_time_stamp_local_clock = 0;
2312 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
2313 if (lrbp->cmd)
2314 ufshcd_clk_scaling_start_busy(hba);
2315 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
2316 ufshcd_start_monitor(hba, lrbp);
2317
2318 if (hba->mcq_enabled) {
2319 int utrd_size = sizeof(struct utp_transfer_req_desc);
2320 struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
2321 struct utp_transfer_req_desc *dest;
2322
2323 spin_lock(&hwq->sq_lock);
2324 dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
2325 memcpy(dest, src, utrd_size);
2326 ufshcd_inc_sq_tail(hwq);
2327 spin_unlock(&hwq->sq_lock);
2328 } else {
2329 spin_lock_irqsave(&hba->outstanding_lock, flags);
2330 if (hba->vops && hba->vops->setup_xfer_req)
2331 hba->vops->setup_xfer_req(hba, lrbp->task_tag,
2332 !!lrbp->cmd);
2333 __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
2334 ufshcd_writel(hba, 1 << lrbp->task_tag,
2335 REG_UTP_TRANSFER_REQ_DOOR_BELL);
2336 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
2337 }
2338 }
2339
2340 /**
2341 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2342 * @lrbp: pointer to local reference block
2343 */
ufshcd_copy_sense_data(struct ufshcd_lrb * lrbp)2344 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
2345 {
2346 u8 *const sense_buffer = lrbp->cmd->sense_buffer;
2347 u16 resp_len;
2348 int len;
2349
2350 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
2351 if (sense_buffer && resp_len) {
2352 int len_to_copy;
2353
2354 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
2355 len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
2356
2357 memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
2358 len_to_copy);
2359 }
2360 }
2361
2362 /**
2363 * ufshcd_copy_query_response() - Copy the Query Response and the data
2364 * descriptor
2365 * @hba: per adapter instance
2366 * @lrbp: pointer to local reference block
2367 *
2368 * Return: 0 upon success; < 0 upon failure.
2369 */
2370 static
ufshcd_copy_query_response(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2371 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2372 {
2373 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2374
2375 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
2376
2377 /* Get the descriptor */
2378 if (hba->dev_cmd.query.descriptor &&
2379 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
2380 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
2381 GENERAL_UPIU_REQUEST_SIZE;
2382 u16 resp_len;
2383 u16 buf_len;
2384
2385 /* data segment length */
2386 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
2387 .data_segment_length);
2388 buf_len = be16_to_cpu(
2389 hba->dev_cmd.query.request.upiu_req.length);
2390 if (likely(buf_len >= resp_len)) {
2391 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
2392 } else {
2393 dev_warn(hba->dev,
2394 "%s: rsp size %d is bigger than buffer size %d",
2395 __func__, resp_len, buf_len);
2396 return -EINVAL;
2397 }
2398 }
2399
2400 return 0;
2401 }
2402
2403 /**
2404 * ufshcd_hba_capabilities - Read controller capabilities
2405 * @hba: per adapter instance
2406 *
2407 * Return: 0 on success, negative on error.
2408 */
ufshcd_hba_capabilities(struct ufs_hba * hba)2409 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
2410 {
2411 int err;
2412
2413 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
2414
2415 /* nutrs and nutmrs are 0 based values */
2416 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_SDB) + 1;
2417 hba->nutmrs =
2418 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2419 hba->reserved_slot = hba->nutrs - 1;
2420
2421 hba->nortt = FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT, hba->capabilities) + 1;
2422
2423 /* Read crypto capabilities */
2424 err = ufshcd_hba_init_crypto_capabilities(hba);
2425 if (err) {
2426 dev_err(hba->dev, "crypto setup failed\n");
2427 return err;
2428 }
2429
2430 /*
2431 * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
2432 * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
2433 * means we can simply read values regardless of version.
2434 */
2435 hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
2436 /*
2437 * 0h: legacy single doorbell support is available
2438 * 1h: indicate that legacy single doorbell support has been removed
2439 */
2440 if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP))
2441 hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
2442 else
2443 hba->lsdb_sup = true;
2444
2445 hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
2446
2447 return 0;
2448 }
2449
2450 /**
2451 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2452 * to accept UIC commands
2453 * @hba: per adapter instance
2454 *
2455 * Return: true on success, else false.
2456 */
ufshcd_ready_for_uic_cmd(struct ufs_hba * hba)2457 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2458 {
2459 u32 val;
2460 int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
2461 500, uic_cmd_timeout * 1000, false, hba,
2462 REG_CONTROLLER_STATUS);
2463 return ret == 0;
2464 }
2465
2466 /**
2467 * ufshcd_get_upmcrs - Get the power mode change request status
2468 * @hba: Pointer to adapter instance
2469 *
2470 * This function gets the UPMCRS field of HCS register
2471 *
2472 * Return: value of UPMCRS field.
2473 */
ufshcd_get_upmcrs(struct ufs_hba * hba)2474 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2475 {
2476 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2477 }
2478
2479 /**
2480 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2481 * @hba: per adapter instance
2482 * @uic_cmd: UIC command
2483 */
2484 static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2485 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2486 {
2487 lockdep_assert_held(&hba->uic_cmd_mutex);
2488
2489 WARN_ON(hba->active_uic_cmd);
2490
2491 hba->active_uic_cmd = uic_cmd;
2492
2493 /* Write Args */
2494 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2495 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2496 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2497
2498 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
2499
2500 /* Write UIC Cmd */
2501 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2502 REG_UIC_COMMAND);
2503 }
2504
2505 /**
2506 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2507 * @hba: per adapter instance
2508 * @uic_cmd: UIC command
2509 *
2510 * Return: 0 only if success.
2511 */
2512 static int
ufshcd_wait_for_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2513 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2514 {
2515 int ret;
2516 unsigned long flags;
2517
2518 lockdep_assert_held(&hba->uic_cmd_mutex);
2519
2520 if (wait_for_completion_timeout(&uic_cmd->done,
2521 msecs_to_jiffies(uic_cmd_timeout))) {
2522 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2523 } else {
2524 ret = -ETIMEDOUT;
2525 dev_err(hba->dev,
2526 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2527 uic_cmd->command, uic_cmd->argument3);
2528
2529 if (!uic_cmd->cmd_active) {
2530 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
2531 __func__);
2532 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2533 }
2534 }
2535
2536 spin_lock_irqsave(hba->host->host_lock, flags);
2537 hba->active_uic_cmd = NULL;
2538 spin_unlock_irqrestore(hba->host->host_lock, flags);
2539
2540 return ret;
2541 }
2542
2543 /**
2544 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2545 * @hba: per adapter instance
2546 * @uic_cmd: UIC command
2547 *
2548 * Return: 0 only if success.
2549 */
2550 static int
__ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2551 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2552 {
2553 lockdep_assert_held(&hba->uic_cmd_mutex);
2554
2555 if (!ufshcd_ready_for_uic_cmd(hba)) {
2556 dev_err(hba->dev,
2557 "Controller not ready to accept UIC commands\n");
2558 return -EIO;
2559 }
2560
2561 init_completion(&uic_cmd->done);
2562
2563 uic_cmd->cmd_active = 1;
2564 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2565
2566 return 0;
2567 }
2568
2569 /**
2570 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2571 * @hba: per adapter instance
2572 * @uic_cmd: UIC command
2573 *
2574 * Return: 0 only if success.
2575 */
ufshcd_send_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)2576 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2577 {
2578 int ret;
2579
2580 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
2581 return 0;
2582
2583 ufshcd_hold(hba);
2584 mutex_lock(&hba->uic_cmd_mutex);
2585 ufshcd_add_delay_before_dme_cmd(hba);
2586
2587 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
2588 if (!ret)
2589 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2590
2591 mutex_unlock(&hba->uic_cmd_mutex);
2592
2593 ufshcd_release(hba);
2594 return ret;
2595 }
2596
2597 /**
2598 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2599 * @hba: per-adapter instance
2600 * @lrbp: pointer to local reference block
2601 * @sg_entries: The number of sg lists actually used
2602 * @sg_list: Pointer to SG list
2603 */
ufshcd_sgl_to_prdt(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int sg_entries,struct scatterlist * sg_list)2604 static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
2605 struct scatterlist *sg_list)
2606 {
2607 struct ufshcd_sg_entry *prd;
2608 struct scatterlist *sg;
2609 int i;
2610
2611 if (sg_entries) {
2612
2613 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2614 lrbp->utr_descriptor_ptr->prd_table_length =
2615 cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
2616 else
2617 lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
2618
2619 prd = lrbp->ucd_prdt_ptr;
2620
2621 for_each_sg(sg_list, sg, sg_entries, i) {
2622 const unsigned int len = sg_dma_len(sg);
2623
2624 /*
2625 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2626 * based value that indicates the length, in bytes, of
2627 * the data block. A maximum of length of 256KB may
2628 * exist for any entry. Bits 1:0 of this field shall be
2629 * 11b to indicate Dword granularity. A value of '3'
2630 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2631 */
2632 WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
2633 prd->size = cpu_to_le32(len - 1);
2634 prd->addr = cpu_to_le64(sg->dma_address);
2635 prd->reserved = 0;
2636 prd = (void *)prd + ufshcd_sg_entry_size(hba);
2637 }
2638 } else {
2639 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2640 }
2641 }
2642
2643 /**
2644 * ufshcd_map_sg - Map scatter-gather list to prdt
2645 * @hba: per adapter instance
2646 * @lrbp: pointer to local reference block
2647 *
2648 * Return: 0 in case of success, non-zero value in case of failure.
2649 */
ufshcd_map_sg(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2650 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2651 {
2652 struct scsi_cmnd *cmd = lrbp->cmd;
2653 int sg_segments = scsi_dma_map(cmd);
2654
2655 if (sg_segments < 0)
2656 return sg_segments;
2657
2658 ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
2659
2660 return ufshcd_crypto_fill_prdt(hba, lrbp);
2661 }
2662
2663 /**
2664 * ufshcd_enable_intr - enable interrupts
2665 * @hba: per adapter instance
2666 * @intrs: interrupt bits
2667 */
ufshcd_enable_intr(struct ufs_hba * hba,u32 intrs)2668 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2669 {
2670 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2671
2672 set |= intrs;
2673 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2674 }
2675
2676 /**
2677 * ufshcd_disable_intr - disable interrupts
2678 * @hba: per adapter instance
2679 * @intrs: interrupt bits
2680 */
ufshcd_disable_intr(struct ufs_hba * hba,u32 intrs)2681 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2682 {
2683 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2684
2685 set &= ~intrs;
2686 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2687 }
2688
2689 /**
2690 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2691 * descriptor according to request
2692 * @hba: per adapter instance
2693 * @lrbp: pointer to local reference block
2694 * @upiu_flags: flags required in the header
2695 * @cmd_dir: requests data direction
2696 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2697 */
2698 static void
ufshcd_prepare_req_desc_hdr(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,u8 * upiu_flags,enum dma_data_direction cmd_dir,int ehs_length)2699 ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
2700 u8 *upiu_flags, enum dma_data_direction cmd_dir,
2701 int ehs_length)
2702 {
2703 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2704 struct request_desc_header *h = &req_desc->header;
2705 enum utp_data_direction data_direction;
2706
2707 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2708
2709 *h = (typeof(*h)){ };
2710
2711 if (cmd_dir == DMA_FROM_DEVICE) {
2712 data_direction = UTP_DEVICE_TO_HOST;
2713 *upiu_flags = UPIU_CMD_FLAGS_READ;
2714 } else if (cmd_dir == DMA_TO_DEVICE) {
2715 data_direction = UTP_HOST_TO_DEVICE;
2716 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2717 } else {
2718 data_direction = UTP_NO_DATA_TRANSFER;
2719 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2720 }
2721
2722 h->command_type = lrbp->command_type;
2723 h->data_direction = data_direction;
2724 h->ehs_length = ehs_length;
2725
2726 if (lrbp->intr_cmd)
2727 h->interrupt = 1;
2728
2729 /* Prepare crypto related dwords */
2730 ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
2731
2732 /*
2733 * assigning invalid value for command status. Controller
2734 * updates OCS on command completion, with the command
2735 * status
2736 */
2737 h->ocs = OCS_INVALID_COMMAND_STATUS;
2738
2739 req_desc->prd_table_length = 0;
2740 }
2741
2742 /**
2743 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2744 * for scsi commands
2745 * @lrbp: local reference block pointer
2746 * @upiu_flags: flags
2747 */
2748 static
ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb * lrbp,u8 upiu_flags)2749 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
2750 {
2751 struct scsi_cmnd *cmd = lrbp->cmd;
2752 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2753 unsigned short cdb_len;
2754
2755 ucd_req_ptr->header = (struct utp_upiu_header){
2756 .transaction_code = UPIU_TRANSACTION_COMMAND,
2757 .flags = upiu_flags,
2758 .lun = lrbp->lun,
2759 .task_tag = lrbp->task_tag,
2760 .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
2761 };
2762
2763 WARN_ON_ONCE(ucd_req_ptr->header.task_tag != lrbp->task_tag);
2764
2765 ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
2766
2767 cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
2768 memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
2769
2770 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2771 }
2772
2773 /**
2774 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2775 * @hba: UFS hba
2776 * @lrbp: local reference block pointer
2777 * @upiu_flags: flags
2778 */
ufshcd_prepare_utp_query_req_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,u8 upiu_flags)2779 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2780 struct ufshcd_lrb *lrbp, u8 upiu_flags)
2781 {
2782 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2783 struct ufs_query *query = &hba->dev_cmd.query;
2784 u16 len = be16_to_cpu(query->request.upiu_req.length);
2785
2786 /* Query request header */
2787 ucd_req_ptr->header = (struct utp_upiu_header){
2788 .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
2789 .flags = upiu_flags,
2790 .lun = lrbp->lun,
2791 .task_tag = lrbp->task_tag,
2792 .query_function = query->request.query_func,
2793 /* Data segment length only need for WRITE_DESC */
2794 .data_segment_length =
2795 query->request.upiu_req.opcode ==
2796 UPIU_QUERY_OPCODE_WRITE_DESC ?
2797 cpu_to_be16(len) :
2798 0,
2799 };
2800
2801 /* Copy the Query Request buffer as is */
2802 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2803 QUERY_OSF_SIZE);
2804
2805 /* Copy the Descriptor */
2806 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2807 memcpy(ucd_req_ptr + 1, query->descriptor, len);
2808
2809 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2810 }
2811
ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb * lrbp)2812 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2813 {
2814 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2815
2816 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2817
2818 ucd_req_ptr->header = (struct utp_upiu_header){
2819 .transaction_code = UPIU_TRANSACTION_NOP_OUT,
2820 .task_tag = lrbp->task_tag,
2821 };
2822
2823 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2824 }
2825
2826 /**
2827 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2828 * for Device Management Purposes
2829 * @hba: per adapter instance
2830 * @lrbp: pointer to local reference block
2831 *
2832 * Return: 0 upon success; < 0 upon failure.
2833 */
ufshcd_compose_devman_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2834 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2835 struct ufshcd_lrb *lrbp)
2836 {
2837 u8 upiu_flags;
2838 int ret = 0;
2839
2840 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
2841
2842 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2843 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2844 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2845 ufshcd_prepare_utp_nop_upiu(lrbp);
2846 else
2847 ret = -EINVAL;
2848
2849 return ret;
2850 }
2851
2852 /**
2853 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2854 * for SCSI Purposes
2855 * @hba: per adapter instance
2856 * @lrbp: pointer to local reference block
2857 */
ufshcd_comp_scsi_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)2858 static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2859 {
2860 struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
2861 unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
2862 u8 upiu_flags;
2863
2864 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
2865 if (ioprio_class == IOPRIO_CLASS_RT)
2866 upiu_flags |= UPIU_CMD_FLAGS_CP;
2867 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2868 }
2869
__ufshcd_setup_cmd(struct ufshcd_lrb * lrbp,struct scsi_cmnd * cmd,u8 lun,int tag)2870 static void __ufshcd_setup_cmd(struct ufshcd_lrb *lrbp, struct scsi_cmnd *cmd, u8 lun, int tag)
2871 {
2872 memset(lrbp->ucd_req_ptr, 0, sizeof(*lrbp->ucd_req_ptr));
2873
2874 lrbp->cmd = cmd;
2875 lrbp->task_tag = tag;
2876 lrbp->lun = lun;
2877 ufshcd_prepare_lrbp_crypto(cmd ? scsi_cmd_to_rq(cmd) : NULL, lrbp);
2878 }
2879
ufshcd_setup_scsi_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct scsi_cmnd * cmd,u8 lun,int tag)2880 static void ufshcd_setup_scsi_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
2881 struct scsi_cmnd *cmd, u8 lun, int tag)
2882 {
2883 __ufshcd_setup_cmd(lrbp, cmd, lun, tag);
2884 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
2885 lrbp->req_abort_skip = false;
2886
2887 ufshcd_comp_scsi_upiu(hba, lrbp);
2888 }
2889
2890 /**
2891 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2892 * @upiu_wlun_id: UPIU W-LUN id
2893 *
2894 * Return: SCSI W-LUN id.
2895 */
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)2896 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2897 {
2898 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2899 }
2900
is_device_wlun(struct scsi_device * sdev)2901 static inline bool is_device_wlun(struct scsi_device *sdev)
2902 {
2903 return sdev->lun ==
2904 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
2905 }
2906
2907 /*
2908 * Associate the UFS controller queue with the default and poll HCTX types.
2909 * Initialize the mq_map[] arrays.
2910 */
ufshcd_map_queues(struct Scsi_Host * shost)2911 static void ufshcd_map_queues(struct Scsi_Host *shost)
2912 {
2913 struct ufs_hba *hba = shost_priv(shost);
2914 int i, queue_offset = 0;
2915
2916 if (!is_mcq_supported(hba)) {
2917 hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
2918 hba->nr_queues[HCTX_TYPE_READ] = 0;
2919 hba->nr_queues[HCTX_TYPE_POLL] = 1;
2920 hba->nr_hw_queues = 1;
2921 }
2922
2923 for (i = 0; i < shost->nr_maps; i++) {
2924 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
2925
2926 map->nr_queues = hba->nr_queues[i];
2927 if (!map->nr_queues)
2928 continue;
2929 map->queue_offset = queue_offset;
2930 if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
2931 map->queue_offset = 0;
2932
2933 blk_mq_map_queues(map);
2934 queue_offset += map->nr_queues;
2935 }
2936 }
2937
ufshcd_init_lrb(struct ufs_hba * hba,struct ufshcd_lrb * lrb,int i)2938 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
2939 {
2940 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
2941 i * ufshcd_get_ucd_size(hba);
2942 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
2943 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
2944 i * ufshcd_get_ucd_size(hba);
2945 u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
2946 u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
2947
2948 lrb->utr_descriptor_ptr = utrdlp + i;
2949 lrb->utrd_dma_addr = hba->utrdl_dma_addr +
2950 i * sizeof(struct utp_transfer_req_desc);
2951 lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
2952 lrb->ucd_req_dma_addr = cmd_desc_element_addr;
2953 lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
2954 lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
2955 lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
2956 lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
2957 }
2958
2959 /**
2960 * ufshcd_queuecommand - main entry point for SCSI requests
2961 * @host: SCSI host pointer
2962 * @cmd: command from SCSI Midlayer
2963 *
2964 * Return: 0 for success, non-zero in case of failure.
2965 */
ufshcd_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)2966 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2967 {
2968 struct ufs_hba *hba = shost_priv(host);
2969 int tag = scsi_cmd_to_rq(cmd)->tag;
2970 struct ufshcd_lrb *lrbp;
2971 int err = 0;
2972 struct ufs_hw_queue *hwq = NULL;
2973
2974 switch (hba->ufshcd_state) {
2975 case UFSHCD_STATE_OPERATIONAL:
2976 break;
2977 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
2978 /*
2979 * SCSI error handler can call ->queuecommand() while UFS error
2980 * handler is in progress. Error interrupts could change the
2981 * state from UFSHCD_STATE_RESET to
2982 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2983 * being issued in that case.
2984 */
2985 if (ufshcd_eh_in_progress(hba)) {
2986 err = SCSI_MLQUEUE_HOST_BUSY;
2987 goto out;
2988 }
2989 break;
2990 case UFSHCD_STATE_EH_SCHEDULED_FATAL:
2991 /*
2992 * pm_runtime_get_sync() is used at error handling preparation
2993 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2994 * PM ops, it can never be finished if we let SCSI layer keep
2995 * retrying it, which gets err handler stuck forever. Neither
2996 * can we let the scsi cmd pass through, because UFS is in bad
2997 * state, the scsi cmd may eventually time out, which will get
2998 * err handler blocked for too long. So, just fail the scsi cmd
2999 * sent from PM ops, err handler can recover PM error anyways.
3000 */
3001 if (hba->pm_op_in_progress) {
3002 hba->force_reset = true;
3003 set_host_byte(cmd, DID_BAD_TARGET);
3004 scsi_done(cmd);
3005 goto out;
3006 }
3007 fallthrough;
3008 case UFSHCD_STATE_RESET:
3009 err = SCSI_MLQUEUE_HOST_BUSY;
3010 goto out;
3011 case UFSHCD_STATE_ERROR:
3012 set_host_byte(cmd, DID_ERROR);
3013 scsi_done(cmd);
3014 goto out;
3015 }
3016
3017 hba->req_abort_count = 0;
3018
3019 ufshcd_hold(hba);
3020
3021 lrbp = &hba->lrb[tag];
3022
3023 ufshcd_setup_scsi_cmd(hba, lrbp, cmd, ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
3024
3025 err = ufshcd_map_sg(hba, lrbp);
3026 if (err) {
3027 ufshcd_release(hba);
3028 goto out;
3029 }
3030
3031 if (hba->mcq_enabled)
3032 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
3033
3034 ufshcd_send_command(hba, tag, hwq);
3035
3036 out:
3037 if (ufs_trigger_eh(hba)) {
3038 unsigned long flags;
3039
3040 spin_lock_irqsave(hba->host->host_lock, flags);
3041 ufshcd_schedule_eh_work(hba);
3042 spin_unlock_irqrestore(hba->host->host_lock, flags);
3043 }
3044
3045 return err;
3046 }
3047
ufshcd_setup_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,enum dev_cmd_type cmd_type,u8 lun,int tag)3048 static void ufshcd_setup_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
3049 enum dev_cmd_type cmd_type, u8 lun, int tag)
3050 {
3051 __ufshcd_setup_cmd(lrbp, NULL, lun, tag);
3052 lrbp->intr_cmd = true; /* No interrupt aggregation */
3053 hba->dev_cmd.type = cmd_type;
3054 }
3055
ufshcd_compose_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,enum dev_cmd_type cmd_type,int tag)3056 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
3057 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
3058 {
3059 ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
3060
3061 return ufshcd_compose_devman_upiu(hba, lrbp);
3062 }
3063
3064 /*
3065 * Check with the block layer if the command is inflight
3066 * @cmd: command to check.
3067 *
3068 * Return: true if command is inflight; false if not.
3069 */
ufshcd_cmd_inflight(struct scsi_cmnd * cmd)3070 bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
3071 {
3072 return cmd && blk_mq_rq_state(scsi_cmd_to_rq(cmd)) == MQ_RQ_IN_FLIGHT;
3073 }
3074
3075 /*
3076 * Clear the pending command in the controller and wait until
3077 * the controller confirms that the command has been cleared.
3078 * @hba: per adapter instance
3079 * @task_tag: The tag number of the command to be cleared.
3080 */
ufshcd_clear_cmd(struct ufs_hba * hba,u32 task_tag)3081 static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
3082 {
3083 u32 mask;
3084 int err;
3085
3086 if (hba->mcq_enabled) {
3087 /*
3088 * MCQ mode. Clean up the MCQ resources similar to
3089 * what the ufshcd_utrl_clear() does for SDB mode.
3090 */
3091 err = ufshcd_mcq_sq_cleanup(hba, task_tag);
3092 if (err) {
3093 dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
3094 __func__, task_tag, err);
3095 return err;
3096 }
3097 return 0;
3098 }
3099
3100 mask = 1U << task_tag;
3101
3102 /* clear outstanding transaction before retry */
3103 ufshcd_utrl_clear(hba, mask);
3104
3105 /*
3106 * wait for h/w to clear corresponding bit in door-bell.
3107 * max. wait is 1 sec.
3108 */
3109 return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
3110 mask, ~mask, 1000, 1000);
3111 }
3112
3113 /**
3114 * ufshcd_dev_cmd_completion() - handles device management command responses
3115 * @hba: per adapter instance
3116 * @lrbp: pointer to local reference block
3117 *
3118 * Return: 0 upon success; < 0 upon failure.
3119 */
3120 static int
ufshcd_dev_cmd_completion(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)3121 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
3122 {
3123 enum upiu_response_transaction resp;
3124 int err = 0;
3125
3126 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
3127 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
3128
3129 switch (resp) {
3130 case UPIU_TRANSACTION_NOP_IN:
3131 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
3132 err = -EINVAL;
3133 dev_err(hba->dev, "%s: unexpected response %x\n",
3134 __func__, resp);
3135 }
3136 break;
3137 case UPIU_TRANSACTION_QUERY_RSP: {
3138 u8 response = lrbp->ucd_rsp_ptr->header.response;
3139
3140 if (response == 0) {
3141 err = ufshcd_copy_query_response(hba, lrbp);
3142 } else {
3143 err = -EINVAL;
3144 dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
3145 __func__, response);
3146 }
3147 break;
3148 }
3149 case UPIU_TRANSACTION_REJECT_UPIU:
3150 /* TODO: handle Reject UPIU Response */
3151 err = -EPERM;
3152 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
3153 __func__);
3154 break;
3155 case UPIU_TRANSACTION_RESPONSE:
3156 if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
3157 err = -EINVAL;
3158 dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
3159 }
3160 break;
3161 default:
3162 err = -EINVAL;
3163 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
3164 __func__, resp);
3165 break;
3166 }
3167
3168 return err;
3169 }
3170
ufshcd_wait_for_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,int max_timeout)3171 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
3172 struct ufshcd_lrb *lrbp, int max_timeout)
3173 {
3174 unsigned long time_left = msecs_to_jiffies(max_timeout);
3175 unsigned long flags;
3176 bool pending;
3177 int err;
3178
3179 retry:
3180 time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
3181 time_left);
3182
3183 if (likely(time_left)) {
3184 err = ufshcd_get_tr_ocs(lrbp, NULL);
3185 if (!err)
3186 err = ufshcd_dev_cmd_completion(hba, lrbp);
3187 } else {
3188 err = -ETIMEDOUT;
3189 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
3190 __func__, lrbp->task_tag);
3191
3192 /* MCQ mode */
3193 if (hba->mcq_enabled) {
3194 /* successfully cleared the command, retry if needed */
3195 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
3196 err = -EAGAIN;
3197 return err;
3198 }
3199
3200 /* SDB mode */
3201 if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
3202 /* successfully cleared the command, retry if needed */
3203 err = -EAGAIN;
3204 /*
3205 * Since clearing the command succeeded we also need to
3206 * clear the task tag bit from the outstanding_reqs
3207 * variable.
3208 */
3209 spin_lock_irqsave(&hba->outstanding_lock, flags);
3210 pending = test_bit(lrbp->task_tag,
3211 &hba->outstanding_reqs);
3212 if (pending)
3213 __clear_bit(lrbp->task_tag,
3214 &hba->outstanding_reqs);
3215 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3216
3217 if (!pending) {
3218 /*
3219 * The completion handler ran while we tried to
3220 * clear the command.
3221 */
3222 time_left = 1;
3223 goto retry;
3224 }
3225 } else {
3226 dev_err(hba->dev, "%s: failed to clear tag %d\n",
3227 __func__, lrbp->task_tag);
3228
3229 spin_lock_irqsave(&hba->outstanding_lock, flags);
3230 pending = test_bit(lrbp->task_tag,
3231 &hba->outstanding_reqs);
3232 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
3233
3234 if (!pending) {
3235 /*
3236 * The completion handler ran while we tried to
3237 * clear the command.
3238 */
3239 time_left = 1;
3240 goto retry;
3241 }
3242 }
3243 }
3244
3245 return err;
3246 }
3247
ufshcd_dev_man_lock(struct ufs_hba * hba)3248 static void ufshcd_dev_man_lock(struct ufs_hba *hba)
3249 {
3250 ufshcd_hold(hba);
3251 mutex_lock(&hba->dev_cmd.lock);
3252 down_read(&hba->clk_scaling_lock);
3253 }
3254
ufshcd_dev_man_unlock(struct ufs_hba * hba)3255 static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
3256 {
3257 up_read(&hba->clk_scaling_lock);
3258 mutex_unlock(&hba->dev_cmd.lock);
3259 ufshcd_release(hba);
3260 }
3261
ufshcd_issue_dev_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,const u32 tag,int timeout)3262 static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
3263 const u32 tag, int timeout)
3264 {
3265 int err;
3266
3267 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
3268 ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
3269 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
3270
3271 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
3272 (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
3273
3274 return err;
3275 }
3276
3277 /**
3278 * ufshcd_exec_dev_cmd - API for sending device management requests
3279 * @hba: UFS hba
3280 * @cmd_type: specifies the type (NOP, Query...)
3281 * @timeout: timeout in milliseconds
3282 *
3283 * Return: 0 upon success; < 0 upon failure.
3284 *
3285 * NOTE: Since there is only one available tag for device management commands,
3286 * it is expected you hold the hba->dev_cmd.lock mutex.
3287 */
ufshcd_exec_dev_cmd(struct ufs_hba * hba,enum dev_cmd_type cmd_type,int timeout)3288 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
3289 enum dev_cmd_type cmd_type, int timeout)
3290 {
3291 const u32 tag = hba->reserved_slot;
3292 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
3293 int err;
3294
3295 /* Protects use of hba->reserved_slot. */
3296 lockdep_assert_held(&hba->dev_cmd.lock);
3297
3298 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
3299 if (unlikely(err))
3300 return err;
3301
3302 return ufshcd_issue_dev_cmd(hba, lrbp, tag, timeout);
3303 }
3304
3305 /**
3306 * ufshcd_init_query() - init the query response and request parameters
3307 * @hba: per-adapter instance
3308 * @request: address of the request pointer to be initialized
3309 * @response: address of the response pointer to be initialized
3310 * @opcode: operation to perform
3311 * @idn: flag idn to access
3312 * @index: LU number to access
3313 * @selector: query/flag/descriptor further identification
3314 */
ufshcd_init_query(struct ufs_hba * hba,struct ufs_query_req ** request,struct ufs_query_res ** response,enum query_opcode opcode,u8 idn,u8 index,u8 selector)3315 static inline void ufshcd_init_query(struct ufs_hba *hba,
3316 struct ufs_query_req **request, struct ufs_query_res **response,
3317 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
3318 {
3319 *request = &hba->dev_cmd.query.request;
3320 *response = &hba->dev_cmd.query.response;
3321 memset(*request, 0, sizeof(struct ufs_query_req));
3322 memset(*response, 0, sizeof(struct ufs_query_res));
3323 (*request)->upiu_req.opcode = opcode;
3324 (*request)->upiu_req.idn = idn;
3325 (*request)->upiu_req.index = index;
3326 (*request)->upiu_req.selector = selector;
3327 }
3328
ufshcd_query_flag_retry(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3329 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
3330 enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
3331 {
3332 int ret;
3333 int retries;
3334
3335 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
3336 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
3337 if (ret)
3338 dev_dbg(hba->dev,
3339 "%s: failed with error %d, retries %d\n",
3340 __func__, ret, retries);
3341 else
3342 break;
3343 }
3344
3345 if (ret)
3346 dev_err(hba->dev,
3347 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3348 __func__, opcode, idn, ret, retries);
3349 return ret;
3350 }
3351
3352 /**
3353 * ufshcd_query_flag() - API function for sending flag query requests
3354 * @hba: per-adapter instance
3355 * @opcode: flag query to perform
3356 * @idn: flag idn to access
3357 * @index: flag index to access
3358 * @flag_res: the flag value after the query request completes
3359 *
3360 * Return: 0 for success, non-zero in case of failure.
3361 */
ufshcd_query_flag(struct ufs_hba * hba,enum query_opcode opcode,enum flag_idn idn,u8 index,bool * flag_res)3362 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
3363 enum flag_idn idn, u8 index, bool *flag_res)
3364 {
3365 struct ufs_query_req *request = NULL;
3366 struct ufs_query_res *response = NULL;
3367 int err, selector = 0;
3368 int timeout = QUERY_REQ_TIMEOUT;
3369
3370 BUG_ON(!hba);
3371
3372 ufshcd_dev_man_lock(hba);
3373
3374 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3375 selector);
3376
3377 switch (opcode) {
3378 case UPIU_QUERY_OPCODE_SET_FLAG:
3379 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
3380 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
3381 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3382 break;
3383 case UPIU_QUERY_OPCODE_READ_FLAG:
3384 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3385 if (!flag_res) {
3386 /* No dummy reads */
3387 dev_err(hba->dev, "%s: Invalid argument for read request\n",
3388 __func__);
3389 err = -EINVAL;
3390 goto out_unlock;
3391 }
3392 break;
3393 default:
3394 dev_err(hba->dev,
3395 "%s: Expected query flag opcode but got = %d\n",
3396 __func__, opcode);
3397 err = -EINVAL;
3398 goto out_unlock;
3399 }
3400
3401 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
3402
3403 if (err) {
3404 dev_err(hba->dev,
3405 "%s: Sending flag query for idn %d failed, err = %d\n",
3406 __func__, idn, err);
3407 goto out_unlock;
3408 }
3409
3410 if (flag_res)
3411 *flag_res = (be32_to_cpu(response->upiu_res.value) &
3412 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
3413
3414 out_unlock:
3415 ufshcd_dev_man_unlock(hba);
3416 return err;
3417 }
3418
3419 /**
3420 * ufshcd_query_attr - API function for sending attribute requests
3421 * @hba: per-adapter instance
3422 * @opcode: attribute opcode
3423 * @idn: attribute idn to access
3424 * @index: index field
3425 * @selector: selector field
3426 * @attr_val: the attribute value after the query request completes
3427 *
3428 * Return: 0 for success, non-zero in case of failure.
3429 */
ufshcd_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3430 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
3431 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
3432 {
3433 struct ufs_query_req *request = NULL;
3434 struct ufs_query_res *response = NULL;
3435 int err;
3436
3437 BUG_ON(!hba);
3438
3439 if (!attr_val) {
3440 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
3441 __func__, opcode);
3442 return -EINVAL;
3443 }
3444
3445 ufshcd_dev_man_lock(hba);
3446
3447 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3448 selector);
3449
3450 switch (opcode) {
3451 case UPIU_QUERY_OPCODE_WRITE_ATTR:
3452 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3453 request->upiu_req.value = cpu_to_be32(*attr_val);
3454 break;
3455 case UPIU_QUERY_OPCODE_READ_ATTR:
3456 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3457 break;
3458 default:
3459 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
3460 __func__, opcode);
3461 err = -EINVAL;
3462 goto out_unlock;
3463 }
3464
3465 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3466
3467 if (err) {
3468 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3469 __func__, opcode, idn, index, err);
3470 goto out_unlock;
3471 }
3472
3473 *attr_val = be32_to_cpu(response->upiu_res.value);
3474
3475 out_unlock:
3476 ufshcd_dev_man_unlock(hba);
3477 return err;
3478 }
3479
3480 /**
3481 * ufshcd_query_attr_retry() - API function for sending query
3482 * attribute with retries
3483 * @hba: per-adapter instance
3484 * @opcode: attribute opcode
3485 * @idn: attribute idn to access
3486 * @index: index field
3487 * @selector: selector field
3488 * @attr_val: the attribute value after the query request
3489 * completes
3490 *
3491 * Return: 0 for success, non-zero in case of failure.
3492 */
ufshcd_query_attr_retry(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u8 index,u8 selector,u32 * attr_val)3493 int ufshcd_query_attr_retry(struct ufs_hba *hba,
3494 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
3495 u32 *attr_val)
3496 {
3497 int ret = 0;
3498 u32 retries;
3499
3500 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3501 ret = ufshcd_query_attr(hba, opcode, idn, index,
3502 selector, attr_val);
3503 if (ret)
3504 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
3505 __func__, ret, retries);
3506 else
3507 break;
3508 }
3509
3510 if (ret)
3511 dev_err(hba->dev,
3512 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3513 __func__, idn, ret, QUERY_REQ_RETRIES);
3514 return ret;
3515 }
3516
__ufshcd_query_descriptor(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3517 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3518 enum query_opcode opcode, enum desc_idn idn, u8 index,
3519 u8 selector, u8 *desc_buf, int *buf_len)
3520 {
3521 struct ufs_query_req *request = NULL;
3522 struct ufs_query_res *response = NULL;
3523 int err;
3524
3525 BUG_ON(!hba);
3526
3527 if (!desc_buf) {
3528 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3529 __func__, opcode);
3530 return -EINVAL;
3531 }
3532
3533 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3534 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3535 __func__, *buf_len);
3536 return -EINVAL;
3537 }
3538
3539 ufshcd_dev_man_lock(hba);
3540
3541 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3542 selector);
3543 hba->dev_cmd.query.descriptor = desc_buf;
3544 request->upiu_req.length = cpu_to_be16(*buf_len);
3545
3546 switch (opcode) {
3547 case UPIU_QUERY_OPCODE_WRITE_DESC:
3548 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3549 break;
3550 case UPIU_QUERY_OPCODE_READ_DESC:
3551 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3552 break;
3553 default:
3554 dev_err(hba->dev,
3555 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3556 __func__, opcode);
3557 err = -EINVAL;
3558 goto out_unlock;
3559 }
3560
3561 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3562
3563 if (err) {
3564 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3565 __func__, opcode, idn, index, err);
3566 goto out_unlock;
3567 }
3568
3569 *buf_len = be16_to_cpu(response->upiu_res.length);
3570
3571 out_unlock:
3572 hba->dev_cmd.query.descriptor = NULL;
3573 ufshcd_dev_man_unlock(hba);
3574 return err;
3575 }
3576
3577 /**
3578 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3579 * @hba: per-adapter instance
3580 * @opcode: attribute opcode
3581 * @idn: attribute idn to access
3582 * @index: index field
3583 * @selector: selector field
3584 * @desc_buf: the buffer that contains the descriptor
3585 * @buf_len: length parameter passed to the device
3586 *
3587 * The buf_len parameter will contain, on return, the length parameter
3588 * received on the response.
3589 *
3590 * Return: 0 for success, non-zero in case of failure.
3591 */
ufshcd_query_descriptor_retry(struct ufs_hba * hba,enum query_opcode opcode,enum desc_idn idn,u8 index,u8 selector,u8 * desc_buf,int * buf_len)3592 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3593 enum query_opcode opcode,
3594 enum desc_idn idn, u8 index,
3595 u8 selector,
3596 u8 *desc_buf, int *buf_len)
3597 {
3598 int err;
3599 int retries;
3600
3601 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3602 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3603 selector, desc_buf, buf_len);
3604 if (!err || err == -EINVAL)
3605 break;
3606 }
3607
3608 return err;
3609 }
3610
3611 /**
3612 * ufshcd_read_desc_param - read the specified descriptor parameter
3613 * @hba: Pointer to adapter instance
3614 * @desc_id: descriptor idn value
3615 * @desc_index: descriptor index
3616 * @param_offset: offset of the parameter to read
3617 * @param_read_buf: pointer to buffer where parameter would be read
3618 * @param_size: sizeof(param_read_buf)
3619 *
3620 * Return: 0 in case of success, non-zero otherwise.
3621 */
ufshcd_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,int desc_index,u8 param_offset,u8 * param_read_buf,u8 param_size)3622 int ufshcd_read_desc_param(struct ufs_hba *hba,
3623 enum desc_idn desc_id,
3624 int desc_index,
3625 u8 param_offset,
3626 u8 *param_read_buf,
3627 u8 param_size)
3628 {
3629 int ret;
3630 u8 *desc_buf;
3631 int buff_len = QUERY_DESC_MAX_SIZE;
3632 bool is_kmalloc = true;
3633
3634 /* Safety check */
3635 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3636 return -EINVAL;
3637
3638 /* Check whether we need temp memory */
3639 if (param_offset != 0 || param_size < buff_len) {
3640 desc_buf = kzalloc(buff_len, GFP_KERNEL);
3641 if (!desc_buf)
3642 return -ENOMEM;
3643 } else {
3644 desc_buf = param_read_buf;
3645 is_kmalloc = false;
3646 }
3647
3648 /* Request for full descriptor */
3649 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3650 desc_id, desc_index, 0,
3651 desc_buf, &buff_len);
3652 if (ret) {
3653 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3654 __func__, desc_id, desc_index, param_offset, ret);
3655 goto out;
3656 }
3657
3658 /* Update descriptor length */
3659 buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
3660
3661 if (param_offset >= buff_len) {
3662 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3663 __func__, param_offset, desc_id, buff_len);
3664 ret = -EINVAL;
3665 goto out;
3666 }
3667
3668 /* Sanity check */
3669 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3670 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
3671 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3672 ret = -EINVAL;
3673 goto out;
3674 }
3675
3676 if (is_kmalloc) {
3677 /* Make sure we don't copy more data than available */
3678 if (param_offset >= buff_len)
3679 ret = -EINVAL;
3680 else
3681 memcpy(param_read_buf, &desc_buf[param_offset],
3682 min_t(u32, param_size, buff_len - param_offset));
3683 }
3684 out:
3685 if (is_kmalloc)
3686 kfree(desc_buf);
3687 return ret;
3688 }
3689
3690 /**
3691 * struct uc_string_id - unicode string
3692 *
3693 * @len: size of this descriptor inclusive
3694 * @type: descriptor type
3695 * @uc: unicode string character
3696 */
3697 struct uc_string_id {
3698 u8 len;
3699 u8 type;
3700 wchar_t uc[];
3701 } __packed;
3702
3703 /* replace non-printable or non-ASCII characters with spaces */
ufshcd_remove_non_printable(u8 ch)3704 static inline char ufshcd_remove_non_printable(u8 ch)
3705 {
3706 return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
3707 }
3708
3709 /**
3710 * ufshcd_read_string_desc - read string descriptor
3711 * @hba: pointer to adapter instance
3712 * @desc_index: descriptor index
3713 * @buf: pointer to buffer where descriptor would be read,
3714 * the caller should free the memory.
3715 * @ascii: if true convert from unicode to ascii characters
3716 * null terminated string.
3717 *
3718 * Return:
3719 * * string size on success.
3720 * * -ENOMEM: on allocation failure
3721 * * -EINVAL: on a wrong parameter
3722 */
ufshcd_read_string_desc(struct ufs_hba * hba,u8 desc_index,u8 ** buf,bool ascii)3723 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
3724 u8 **buf, bool ascii)
3725 {
3726 struct uc_string_id *uc_str;
3727 u8 *str;
3728 int ret;
3729
3730 if (!buf)
3731 return -EINVAL;
3732
3733 uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
3734 if (!uc_str)
3735 return -ENOMEM;
3736
3737 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
3738 (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
3739 if (ret < 0) {
3740 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
3741 QUERY_REQ_RETRIES, ret);
3742 str = NULL;
3743 goto out;
3744 }
3745
3746 if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
3747 dev_dbg(hba->dev, "String Desc is of zero length\n");
3748 str = NULL;
3749 ret = 0;
3750 goto out;
3751 }
3752
3753 if (ascii) {
3754 ssize_t ascii_len;
3755 int i;
3756 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3757 ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3758 str = kzalloc(ascii_len, GFP_KERNEL);
3759 if (!str) {
3760 ret = -ENOMEM;
3761 goto out;
3762 }
3763
3764 /*
3765 * the descriptor contains string in UTF16 format
3766 * we need to convert to utf-8 so it can be displayed
3767 */
3768 ret = utf16s_to_utf8s(uc_str->uc,
3769 uc_str->len - QUERY_DESC_HDR_SIZE,
3770 UTF16_BIG_ENDIAN, str, ascii_len - 1);
3771
3772 /* replace non-printable or non-ASCII characters with spaces */
3773 for (i = 0; i < ret; i++)
3774 str[i] = ufshcd_remove_non_printable(str[i]);
3775
3776 str[ret++] = '\0';
3777
3778 } else {
3779 str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
3780 if (!str) {
3781 ret = -ENOMEM;
3782 goto out;
3783 }
3784 ret = uc_str->len;
3785 }
3786 out:
3787 *buf = str;
3788 kfree(uc_str);
3789 return ret;
3790 }
3791
3792 /**
3793 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3794 * @hba: Pointer to adapter instance
3795 * @lun: lun id
3796 * @param_offset: offset of the parameter to read
3797 * @param_read_buf: pointer to buffer where parameter would be read
3798 * @param_size: sizeof(param_read_buf)
3799 *
3800 * Return: 0 in case of success, non-zero otherwise.
3801 */
ufshcd_read_unit_desc_param(struct ufs_hba * hba,int lun,enum unit_desc_param param_offset,u8 * param_read_buf,u32 param_size)3802 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3803 int lun,
3804 enum unit_desc_param param_offset,
3805 u8 *param_read_buf,
3806 u32 param_size)
3807 {
3808 /*
3809 * Unit descriptors are only available for general purpose LUs (LUN id
3810 * from 0 to 7) and RPMB Well known LU.
3811 */
3812 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
3813 return -EOPNOTSUPP;
3814
3815 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3816 param_offset, param_read_buf, param_size);
3817 }
3818
ufshcd_get_ref_clk_gating_wait(struct ufs_hba * hba)3819 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
3820 {
3821 int err = 0;
3822 u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3823
3824 if (hba->dev_info.wspecversion >= 0x300) {
3825 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
3826 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
3827 &gating_wait);
3828 if (err)
3829 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3830 err, gating_wait);
3831
3832 if (gating_wait == 0) {
3833 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
3834 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
3835 gating_wait);
3836 }
3837
3838 hba->dev_info.clk_gating_wait_us = gating_wait;
3839 }
3840
3841 return err;
3842 }
3843
3844 /**
3845 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3846 * @hba: per adapter instance
3847 *
3848 * 1. Allocate DMA memory for Command Descriptor array
3849 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3850 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3851 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3852 * (UTMRDL)
3853 * 4. Allocate memory for local reference block(lrb).
3854 *
3855 * Return: 0 for success, non-zero in case of failure.
3856 */
ufshcd_memory_alloc(struct ufs_hba * hba)3857 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3858 {
3859 size_t utmrdl_size, utrdl_size, ucdl_size;
3860
3861 /* Allocate memory for UTP command descriptors */
3862 ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
3863 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3864 ucdl_size,
3865 &hba->ucdl_dma_addr,
3866 GFP_KERNEL);
3867
3868 /*
3869 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3870 */
3871 if (!hba->ucdl_base_addr ||
3872 WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
3873 dev_err(hba->dev,
3874 "Command Descriptor Memory allocation failed\n");
3875 goto out;
3876 }
3877
3878 /*
3879 * Allocate memory for UTP Transfer descriptors
3880 * UFSHCI requires 1KB alignment of UTRD
3881 */
3882 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3883 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3884 utrdl_size,
3885 &hba->utrdl_dma_addr,
3886 GFP_KERNEL);
3887 if (!hba->utrdl_base_addr ||
3888 WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
3889 dev_err(hba->dev,
3890 "Transfer Descriptor Memory allocation failed\n");
3891 goto out;
3892 }
3893
3894 /*
3895 * Skip utmrdl allocation; it may have been
3896 * allocated during first pass and not released during
3897 * MCQ memory allocation.
3898 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3899 */
3900 if (hba->utmrdl_base_addr)
3901 goto skip_utmrdl;
3902 /*
3903 * Allocate memory for UTP Task Management descriptors
3904 * UFSHCI requires 1KB alignment of UTMRD
3905 */
3906 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3907 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3908 utmrdl_size,
3909 &hba->utmrdl_dma_addr,
3910 GFP_KERNEL);
3911 if (!hba->utmrdl_base_addr ||
3912 WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
3913 dev_err(hba->dev,
3914 "Task Management Descriptor Memory allocation failed\n");
3915 goto out;
3916 }
3917
3918 skip_utmrdl:
3919 /* Allocate memory for local reference block */
3920 hba->lrb = devm_kcalloc(hba->dev,
3921 hba->nutrs, sizeof(struct ufshcd_lrb),
3922 GFP_KERNEL);
3923 if (!hba->lrb) {
3924 dev_err(hba->dev, "LRB Memory allocation failed\n");
3925 goto out;
3926 }
3927 return 0;
3928 out:
3929 return -ENOMEM;
3930 }
3931
3932 /**
3933 * ufshcd_host_memory_configure - configure local reference block with
3934 * memory offsets
3935 * @hba: per adapter instance
3936 *
3937 * Configure Host memory space
3938 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3939 * address.
3940 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3941 * and PRDT offset.
3942 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3943 * into local reference block.
3944 */
ufshcd_host_memory_configure(struct ufs_hba * hba)3945 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3946 {
3947 struct utp_transfer_req_desc *utrdlp;
3948 dma_addr_t cmd_desc_dma_addr;
3949 dma_addr_t cmd_desc_element_addr;
3950 u16 response_offset;
3951 u16 prdt_offset;
3952 int cmd_desc_size;
3953 int i;
3954
3955 utrdlp = hba->utrdl_base_addr;
3956
3957 response_offset =
3958 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3959 prdt_offset =
3960 offsetof(struct utp_transfer_cmd_desc, prd_table);
3961
3962 cmd_desc_size = ufshcd_get_ucd_size(hba);
3963 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3964
3965 for (i = 0; i < hba->nutrs; i++) {
3966 /* Configure UTRD with command descriptor base address */
3967 cmd_desc_element_addr =
3968 (cmd_desc_dma_addr + (cmd_desc_size * i));
3969 utrdlp[i].command_desc_base_addr =
3970 cpu_to_le64(cmd_desc_element_addr);
3971
3972 /* Response upiu and prdt offset should be in double words */
3973 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3974 utrdlp[i].response_upiu_offset =
3975 cpu_to_le16(response_offset);
3976 utrdlp[i].prd_table_offset =
3977 cpu_to_le16(prdt_offset);
3978 utrdlp[i].response_upiu_length =
3979 cpu_to_le16(ALIGNED_UPIU_SIZE);
3980 } else {
3981 utrdlp[i].response_upiu_offset =
3982 cpu_to_le16(response_offset >> 2);
3983 utrdlp[i].prd_table_offset =
3984 cpu_to_le16(prdt_offset >> 2);
3985 utrdlp[i].response_upiu_length =
3986 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3987 }
3988
3989 ufshcd_init_lrb(hba, &hba->lrb[i], i);
3990 }
3991 }
3992
3993 /**
3994 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3995 * @hba: per adapter instance
3996 *
3997 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3998 * in order to initialize the Unipro link startup procedure.
3999 * Once the Unipro links are up, the device connected to the controller
4000 * is detected.
4001 *
4002 * Return: 0 on success, non-zero value on failure.
4003 */
ufshcd_dme_link_startup(struct ufs_hba * hba)4004 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
4005 {
4006 struct uic_command uic_cmd = {
4007 .command = UIC_CMD_DME_LINK_STARTUP,
4008 };
4009 int ret;
4010
4011 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4012 if (ret)
4013 dev_dbg(hba->dev,
4014 "dme-link-startup: error code %d\n", ret);
4015 return ret;
4016 }
4017 /**
4018 * ufshcd_dme_reset - UIC command for DME_RESET
4019 * @hba: per adapter instance
4020 *
4021 * DME_RESET command is issued in order to reset UniPro stack.
4022 * This function now deals with cold reset.
4023 *
4024 * Return: 0 on success, non-zero value on failure.
4025 */
ufshcd_dme_reset(struct ufs_hba * hba)4026 int ufshcd_dme_reset(struct ufs_hba *hba)
4027 {
4028 struct uic_command uic_cmd = {
4029 .command = UIC_CMD_DME_RESET,
4030 };
4031 int ret;
4032
4033 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4034 if (ret)
4035 dev_err(hba->dev,
4036 "dme-reset: error code %d\n", ret);
4037
4038 return ret;
4039 }
4040 EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
4041
ufshcd_dme_configure_adapt(struct ufs_hba * hba,int agreed_gear,int adapt_val)4042 int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
4043 int agreed_gear,
4044 int adapt_val)
4045 {
4046 int ret;
4047
4048 if (agreed_gear < UFS_HS_G4)
4049 adapt_val = PA_NO_ADAPT;
4050
4051 ret = ufshcd_dme_set(hba,
4052 UIC_ARG_MIB(PA_TXHSADAPTTYPE),
4053 adapt_val);
4054 return ret;
4055 }
4056 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
4057
4058 /**
4059 * ufshcd_dme_enable - UIC command for DME_ENABLE
4060 * @hba: per adapter instance
4061 *
4062 * DME_ENABLE command is issued in order to enable UniPro stack.
4063 *
4064 * Return: 0 on success, non-zero value on failure.
4065 */
ufshcd_dme_enable(struct ufs_hba * hba)4066 int ufshcd_dme_enable(struct ufs_hba *hba)
4067 {
4068 struct uic_command uic_cmd = {
4069 .command = UIC_CMD_DME_ENABLE,
4070 };
4071 int ret;
4072
4073 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4074 if (ret)
4075 dev_err(hba->dev,
4076 "dme-enable: error code %d\n", ret);
4077
4078 return ret;
4079 }
4080 EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
4081
ufshcd_add_delay_before_dme_cmd(struct ufs_hba * hba)4082 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
4083 {
4084 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4085 unsigned long min_sleep_time_us;
4086
4087 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
4088 return;
4089
4090 /*
4091 * last_dme_cmd_tstamp will be 0 only for 1st call to
4092 * this function
4093 */
4094 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
4095 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
4096 } else {
4097 unsigned long delta =
4098 (unsigned long) ktime_to_us(
4099 ktime_sub(ktime_get(),
4100 hba->last_dme_cmd_tstamp));
4101
4102 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
4103 min_sleep_time_us =
4104 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
4105 else
4106 min_sleep_time_us = 0; /* no more delay required */
4107 }
4108
4109 if (min_sleep_time_us > 0) {
4110 /* allow sleep for extra 50us if needed */
4111 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
4112 }
4113
4114 /* update the last_dme_cmd_tstamp */
4115 hba->last_dme_cmd_tstamp = ktime_get();
4116 }
4117
4118 /**
4119 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4120 * @hba: per adapter instance
4121 * @attr_sel: uic command argument1
4122 * @attr_set: attribute set type as uic command argument2
4123 * @mib_val: setting value as uic command argument3
4124 * @peer: indicate whether peer or local
4125 *
4126 * Return: 0 on success, non-zero value on failure.
4127 */
ufshcd_dme_set_attr(struct ufs_hba * hba,u32 attr_sel,u8 attr_set,u32 mib_val,u8 peer)4128 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
4129 u8 attr_set, u32 mib_val, u8 peer)
4130 {
4131 struct uic_command uic_cmd = {
4132 .command = peer ? UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET,
4133 .argument1 = attr_sel,
4134 .argument2 = UIC_ARG_ATTR_TYPE(attr_set),
4135 .argument3 = mib_val,
4136 };
4137 static const char *const action[] = {
4138 "dme-set",
4139 "dme-peer-set"
4140 };
4141 const char *set = action[!!peer];
4142 int ret;
4143 int retries = UFS_UIC_COMMAND_RETRIES;
4144
4145 do {
4146 /* for peer attributes we retry upon failure */
4147 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4148 if (ret)
4149 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
4150 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
4151 } while (ret && peer && --retries);
4152
4153 if (ret)
4154 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4155 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
4156 UFS_UIC_COMMAND_RETRIES - retries);
4157
4158 return ret;
4159 }
4160 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
4161
4162 /**
4163 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4164 * @hba: per adapter instance
4165 * @attr_sel: uic command argument1
4166 * @mib_val: the value of the attribute as returned by the UIC command
4167 * @peer: indicate whether peer or local
4168 *
4169 * Return: 0 on success, non-zero value on failure.
4170 */
ufshcd_dme_get_attr(struct ufs_hba * hba,u32 attr_sel,u32 * mib_val,u8 peer)4171 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
4172 u32 *mib_val, u8 peer)
4173 {
4174 struct uic_command uic_cmd = {
4175 .command = peer ? UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET,
4176 .argument1 = attr_sel,
4177 };
4178 static const char *const action[] = {
4179 "dme-get",
4180 "dme-peer-get"
4181 };
4182 const char *get = action[!!peer];
4183 int ret;
4184 int retries = UFS_UIC_COMMAND_RETRIES;
4185 struct ufs_pa_layer_attr orig_pwr_info;
4186 struct ufs_pa_layer_attr temp_pwr_info;
4187 bool pwr_mode_change = false;
4188
4189 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
4190 orig_pwr_info = hba->pwr_info;
4191 temp_pwr_info = orig_pwr_info;
4192
4193 if (orig_pwr_info.pwr_tx == FAST_MODE ||
4194 orig_pwr_info.pwr_rx == FAST_MODE) {
4195 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
4196 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
4197 pwr_mode_change = true;
4198 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
4199 orig_pwr_info.pwr_rx == SLOW_MODE) {
4200 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
4201 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
4202 pwr_mode_change = true;
4203 }
4204 if (pwr_mode_change) {
4205 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
4206 if (ret)
4207 goto out;
4208 }
4209 }
4210
4211 do {
4212 /* for peer attributes we retry upon failure */
4213 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
4214 if (ret)
4215 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
4216 get, UIC_GET_ATTR_ID(attr_sel), ret);
4217 } while (ret && peer && --retries);
4218
4219 if (ret)
4220 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
4221 get, UIC_GET_ATTR_ID(attr_sel),
4222 UFS_UIC_COMMAND_RETRIES - retries);
4223
4224 if (mib_val && !ret)
4225 *mib_val = uic_cmd.argument3;
4226
4227 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
4228 && pwr_mode_change)
4229 ufshcd_change_power_mode(hba, &orig_pwr_info);
4230 out:
4231 return ret;
4232 }
4233 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
4234
4235 /**
4236 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4237 * state) and waits for it to take effect.
4238 *
4239 * @hba: per adapter instance
4240 * @cmd: UIC command to execute
4241 *
4242 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4243 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4244 * and device UniPro link and hence it's final completion would be indicated by
4245 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4246 * addition to normal UIC command completion Status (UCCS). This function only
4247 * returns after the relevant status bits indicate the completion.
4248 *
4249 * Return: 0 on success, non-zero value on failure.
4250 */
ufshcd_uic_pwr_ctrl(struct ufs_hba * hba,struct uic_command * cmd)4251 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
4252 {
4253 DECLARE_COMPLETION_ONSTACK(uic_async_done);
4254 unsigned long flags;
4255 u8 status;
4256 int ret;
4257 bool reenable_intr = false;
4258
4259 mutex_lock(&hba->uic_cmd_mutex);
4260 ufshcd_add_delay_before_dme_cmd(hba);
4261
4262 spin_lock_irqsave(hba->host->host_lock, flags);
4263 if (ufshcd_is_link_broken(hba)) {
4264 ret = -ENOLINK;
4265 goto out_unlock;
4266 }
4267 hba->uic_async_done = &uic_async_done;
4268 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
4269 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
4270 /*
4271 * Make sure UIC command completion interrupt is disabled before
4272 * issuing UIC command.
4273 */
4274 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
4275 reenable_intr = true;
4276 }
4277 spin_unlock_irqrestore(hba->host->host_lock, flags);
4278 ret = __ufshcd_send_uic_cmd(hba, cmd);
4279 if (ret) {
4280 dev_err(hba->dev,
4281 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4282 cmd->command, cmd->argument3, ret);
4283 goto out;
4284 }
4285
4286 if (!wait_for_completion_timeout(hba->uic_async_done,
4287 msecs_to_jiffies(uic_cmd_timeout))) {
4288 dev_err(hba->dev,
4289 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4290 cmd->command, cmd->argument3);
4291
4292 if (!cmd->cmd_active) {
4293 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4294 __func__);
4295 goto check_upmcrs;
4296 }
4297
4298 ret = -ETIMEDOUT;
4299 goto out;
4300 }
4301
4302 check_upmcrs:
4303 status = ufshcd_get_upmcrs(hba);
4304 if (status != PWR_LOCAL) {
4305 dev_err(hba->dev,
4306 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4307 cmd->command, status);
4308 ret = (status != PWR_OK) ? status : -1;
4309 }
4310 out:
4311 if (ret) {
4312 ufshcd_print_host_state(hba);
4313 ufshcd_print_pwr_info(hba);
4314 ufshcd_print_evt_hist(hba);
4315 }
4316
4317 spin_lock_irqsave(hba->host->host_lock, flags);
4318 hba->active_uic_cmd = NULL;
4319 hba->uic_async_done = NULL;
4320 if (reenable_intr)
4321 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
4322 if (ret) {
4323 ufshcd_set_link_broken(hba);
4324 ufshcd_schedule_eh_work(hba);
4325 }
4326 out_unlock:
4327 spin_unlock_irqrestore(hba->host->host_lock, flags);
4328 mutex_unlock(&hba->uic_cmd_mutex);
4329
4330 return ret;
4331 }
4332
4333 /**
4334 * ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
4335 * @hba: per adapter instance
4336 * @uic_cmd: UIC command
4337 *
4338 * Return: 0 only if success.
4339 */
ufshcd_send_bsg_uic_cmd(struct ufs_hba * hba,struct uic_command * uic_cmd)4340 int ufshcd_send_bsg_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
4341 {
4342 int ret;
4343
4344 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
4345 return 0;
4346
4347 ufshcd_hold(hba);
4348
4349 if (uic_cmd->argument1 == UIC_ARG_MIB(PA_PWRMODE) &&
4350 uic_cmd->command == UIC_CMD_DME_SET) {
4351 ret = ufshcd_uic_pwr_ctrl(hba, uic_cmd);
4352 goto out;
4353 }
4354
4355 mutex_lock(&hba->uic_cmd_mutex);
4356 ufshcd_add_delay_before_dme_cmd(hba);
4357
4358 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
4359 if (!ret)
4360 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
4361
4362 mutex_unlock(&hba->uic_cmd_mutex);
4363
4364 out:
4365 ufshcd_release(hba);
4366 return ret;
4367 }
4368
4369 /**
4370 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4371 * using DME_SET primitives.
4372 * @hba: per adapter instance
4373 * @mode: powr mode value
4374 *
4375 * Return: 0 on success, non-zero value on failure.
4376 */
ufshcd_uic_change_pwr_mode(struct ufs_hba * hba,u8 mode)4377 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
4378 {
4379 struct uic_command uic_cmd = {
4380 .command = UIC_CMD_DME_SET,
4381 .argument1 = UIC_ARG_MIB(PA_PWRMODE),
4382 .argument3 = mode,
4383 };
4384 int ret;
4385
4386 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
4387 ret = ufshcd_dme_set(hba,
4388 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
4389 if (ret) {
4390 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4391 __func__, ret);
4392 goto out;
4393 }
4394 }
4395
4396 ufshcd_hold(hba);
4397 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4398 ufshcd_release(hba);
4399
4400 out:
4401 return ret;
4402 }
4403 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
4404
ufshcd_link_recovery(struct ufs_hba * hba)4405 int ufshcd_link_recovery(struct ufs_hba *hba)
4406 {
4407 int ret;
4408 unsigned long flags;
4409
4410 spin_lock_irqsave(hba->host->host_lock, flags);
4411 hba->ufshcd_state = UFSHCD_STATE_RESET;
4412 ufshcd_set_eh_in_progress(hba);
4413 spin_unlock_irqrestore(hba->host->host_lock, flags);
4414
4415 /* Reset the attached device */
4416 ufshcd_device_reset(hba);
4417
4418 ret = ufshcd_host_reset_and_restore(hba);
4419
4420 spin_lock_irqsave(hba->host->host_lock, flags);
4421 if (ret)
4422 hba->ufshcd_state = UFSHCD_STATE_ERROR;
4423 ufshcd_clear_eh_in_progress(hba);
4424 spin_unlock_irqrestore(hba->host->host_lock, flags);
4425
4426 if (ret)
4427 dev_err(hba->dev, "%s: link recovery failed, err %d",
4428 __func__, ret);
4429
4430 return ret;
4431 }
4432 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
4433
ufshcd_uic_hibern8_enter(struct ufs_hba * hba)4434 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
4435 {
4436 struct uic_command uic_cmd = {
4437 .command = UIC_CMD_DME_HIBER_ENTER,
4438 };
4439 ktime_t start = ktime_get();
4440 int ret;
4441
4442 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
4443
4444 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4445 trace_ufshcd_profile_hibern8(hba, "enter",
4446 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4447
4448 if (ret)
4449 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
4450 __func__, ret);
4451 else
4452 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
4453 POST_CHANGE);
4454
4455 return ret;
4456 }
4457 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
4458
ufshcd_uic_hibern8_exit(struct ufs_hba * hba)4459 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
4460 {
4461 struct uic_command uic_cmd = {
4462 .command = UIC_CMD_DME_HIBER_EXIT,
4463 };
4464 int ret;
4465 ktime_t start = ktime_get();
4466
4467 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
4468
4469 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
4470 trace_ufshcd_profile_hibern8(hba, "exit",
4471 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
4472
4473 if (ret) {
4474 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
4475 __func__, ret);
4476 } else {
4477 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
4478 POST_CHANGE);
4479 hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
4480 hba->ufs_stats.hibern8_exit_cnt++;
4481 }
4482
4483 return ret;
4484 }
4485 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
4486
ufshcd_configure_auto_hibern8(struct ufs_hba * hba)4487 static void ufshcd_configure_auto_hibern8(struct ufs_hba *hba)
4488 {
4489 if (!ufshcd_is_auto_hibern8_supported(hba))
4490 return;
4491
4492 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
4493 }
4494
ufshcd_auto_hibern8_update(struct ufs_hba * hba,u32 ahit)4495 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
4496 {
4497 const u32 cur_ahit = READ_ONCE(hba->ahit);
4498
4499 if (!ufshcd_is_auto_hibern8_supported(hba) || cur_ahit == ahit)
4500 return;
4501
4502 WRITE_ONCE(hba->ahit, ahit);
4503 if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
4504 ufshcd_rpm_get_sync(hba);
4505 ufshcd_hold(hba);
4506 ufshcd_configure_auto_hibern8(hba);
4507 ufshcd_release(hba);
4508 ufshcd_rpm_put_sync(hba);
4509 }
4510 }
4511 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
4512
4513 /**
4514 * ufshcd_init_pwr_info - setting the POR (power on reset)
4515 * values in hba power info
4516 * @hba: per-adapter instance
4517 */
ufshcd_init_pwr_info(struct ufs_hba * hba)4518 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4519 {
4520 hba->pwr_info.gear_rx = UFS_PWM_G1;
4521 hba->pwr_info.gear_tx = UFS_PWM_G1;
4522 hba->pwr_info.lane_rx = UFS_LANE_1;
4523 hba->pwr_info.lane_tx = UFS_LANE_1;
4524 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4525 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4526 hba->pwr_info.hs_rate = 0;
4527 }
4528
4529 /**
4530 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4531 * @hba: per-adapter instance
4532 *
4533 * Return: 0 upon success; < 0 upon failure.
4534 */
ufshcd_get_max_pwr_mode(struct ufs_hba * hba)4535 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4536 {
4537 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4538
4539 if (hba->max_pwr_info.is_valid)
4540 return 0;
4541
4542 if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
4543 pwr_info->pwr_tx = FASTAUTO_MODE;
4544 pwr_info->pwr_rx = FASTAUTO_MODE;
4545 } else {
4546 pwr_info->pwr_tx = FAST_MODE;
4547 pwr_info->pwr_rx = FAST_MODE;
4548 }
4549 pwr_info->hs_rate = PA_HS_MODE_B;
4550
4551 /* Get the connected lane count */
4552 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4553 &pwr_info->lane_rx);
4554 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4555 &pwr_info->lane_tx);
4556
4557 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4558 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4559 __func__,
4560 pwr_info->lane_rx,
4561 pwr_info->lane_tx);
4562 return -EINVAL;
4563 }
4564
4565 if (pwr_info->lane_rx != pwr_info->lane_tx) {
4566 dev_err(hba->dev, "%s: asymmetric connected lanes. rx=%d, tx=%d\n",
4567 __func__,
4568 pwr_info->lane_rx,
4569 pwr_info->lane_tx);
4570 return -EINVAL;
4571 }
4572
4573 /*
4574 * First, get the maximum gears of HS speed.
4575 * If a zero value, it means there is no HSGEAR capability.
4576 * Then, get the maximum gears of PWM speed.
4577 */
4578 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4579 if (!pwr_info->gear_rx) {
4580 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4581 &pwr_info->gear_rx);
4582 if (!pwr_info->gear_rx) {
4583 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4584 __func__, pwr_info->gear_rx);
4585 return -EINVAL;
4586 }
4587 pwr_info->pwr_rx = SLOW_MODE;
4588 }
4589
4590 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4591 &pwr_info->gear_tx);
4592 if (!pwr_info->gear_tx) {
4593 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4594 &pwr_info->gear_tx);
4595 if (!pwr_info->gear_tx) {
4596 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4597 __func__, pwr_info->gear_tx);
4598 return -EINVAL;
4599 }
4600 pwr_info->pwr_tx = SLOW_MODE;
4601 }
4602
4603 hba->max_pwr_info.is_valid = true;
4604 return 0;
4605 }
4606
ufshcd_change_power_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * pwr_mode)4607 static int ufshcd_change_power_mode(struct ufs_hba *hba,
4608 struct ufs_pa_layer_attr *pwr_mode)
4609 {
4610 int ret;
4611
4612 /* if already configured to the requested pwr_mode */
4613 if (!hba->force_pmc &&
4614 pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4615 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4616 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4617 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4618 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4619 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4620 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4621 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4622 return 0;
4623 }
4624
4625 /*
4626 * Configure attributes for power mode change with below.
4627 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4628 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4629 * - PA_HSSERIES
4630 */
4631 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4632 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4633 pwr_mode->lane_rx);
4634 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4635 pwr_mode->pwr_rx == FAST_MODE)
4636 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
4637 else
4638 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
4639
4640 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4641 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4642 pwr_mode->lane_tx);
4643 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4644 pwr_mode->pwr_tx == FAST_MODE)
4645 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
4646 else
4647 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
4648
4649 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4650 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4651 pwr_mode->pwr_rx == FAST_MODE ||
4652 pwr_mode->pwr_tx == FAST_MODE)
4653 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4654 pwr_mode->hs_rate);
4655
4656 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
4657 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
4658 DL_FC0ProtectionTimeOutVal_Default);
4659 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
4660 DL_TC0ReplayTimeOutVal_Default);
4661 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
4662 DL_AFC0ReqTimeOutVal_Default);
4663 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
4664 DL_FC1ProtectionTimeOutVal_Default);
4665 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
4666 DL_TC1ReplayTimeOutVal_Default);
4667 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
4668 DL_AFC1ReqTimeOutVal_Default);
4669
4670 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
4671 DL_FC0ProtectionTimeOutVal_Default);
4672 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
4673 DL_TC0ReplayTimeOutVal_Default);
4674 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
4675 DL_AFC0ReqTimeOutVal_Default);
4676 }
4677
4678 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4679 | pwr_mode->pwr_tx);
4680
4681 if (ret) {
4682 dev_err(hba->dev,
4683 "%s: power mode change failed %d\n", __func__, ret);
4684 } else {
4685 memcpy(&hba->pwr_info, pwr_mode,
4686 sizeof(struct ufs_pa_layer_attr));
4687 }
4688
4689 return ret;
4690 }
4691
4692 /**
4693 * ufshcd_config_pwr_mode - configure a new power mode
4694 * @hba: per-adapter instance
4695 * @desired_pwr_mode: desired power configuration
4696 *
4697 * Return: 0 upon success; < 0 upon failure.
4698 */
ufshcd_config_pwr_mode(struct ufs_hba * hba,struct ufs_pa_layer_attr * desired_pwr_mode)4699 int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4700 struct ufs_pa_layer_attr *desired_pwr_mode)
4701 {
4702 struct ufs_pa_layer_attr final_params = { 0 };
4703 int ret;
4704
4705 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4706 desired_pwr_mode, &final_params);
4707
4708 if (ret)
4709 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4710
4711 ret = ufshcd_change_power_mode(hba, &final_params);
4712
4713 if (!ret)
4714 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4715 &final_params);
4716
4717 return ret;
4718 }
4719 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4720
4721 /**
4722 * ufshcd_complete_dev_init() - checks device readiness
4723 * @hba: per-adapter instance
4724 *
4725 * Set fDeviceInit flag and poll until device toggles it.
4726 *
4727 * Return: 0 upon success; < 0 upon failure.
4728 */
ufshcd_complete_dev_init(struct ufs_hba * hba)4729 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4730 {
4731 int err;
4732 bool flag_res = true;
4733 ktime_t timeout;
4734
4735 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4736 QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
4737 if (err) {
4738 dev_err(hba->dev,
4739 "%s: setting fDeviceInit flag failed with error %d\n",
4740 __func__, err);
4741 goto out;
4742 }
4743
4744 /* Poll fDeviceInit flag to be cleared */
4745 timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
4746 do {
4747 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4748 QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
4749 if (!flag_res)
4750 break;
4751 usleep_range(500, 1000);
4752 } while (ktime_before(ktime_get(), timeout));
4753
4754 if (err) {
4755 dev_err(hba->dev,
4756 "%s: reading fDeviceInit flag failed with error %d\n",
4757 __func__, err);
4758 } else if (flag_res) {
4759 dev_err(hba->dev,
4760 "%s: fDeviceInit was not cleared by the device\n",
4761 __func__);
4762 err = -EBUSY;
4763 }
4764 out:
4765 return err;
4766 }
4767
4768 /**
4769 * ufshcd_make_hba_operational - Make UFS controller operational
4770 * @hba: per adapter instance
4771 *
4772 * To bring UFS host controller to operational state,
4773 * 1. Enable required interrupts
4774 * 2. Configure interrupt aggregation
4775 * 3. Program UTRL and UTMRL base address
4776 * 4. Configure run-stop-registers
4777 *
4778 * Return: 0 on success, non-zero value on failure.
4779 */
ufshcd_make_hba_operational(struct ufs_hba * hba)4780 int ufshcd_make_hba_operational(struct ufs_hba *hba)
4781 {
4782 int err = 0;
4783 u32 reg;
4784
4785 /* Enable required interrupts */
4786 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4787
4788 /* Configure interrupt aggregation */
4789 if (ufshcd_is_intr_aggr_allowed(hba))
4790 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4791 else
4792 ufshcd_disable_intr_aggr(hba);
4793
4794 /* Configure UTRL and UTMRL base address registers */
4795 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4796 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4797 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4798 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4799 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4800 REG_UTP_TASK_REQ_LIST_BASE_L);
4801 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4802 REG_UTP_TASK_REQ_LIST_BASE_H);
4803
4804 /*
4805 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4806 */
4807 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4808 if (!(ufshcd_get_lists_status(reg))) {
4809 ufshcd_enable_run_stop_reg(hba);
4810 } else {
4811 dev_err(hba->dev,
4812 "Host controller not ready to process requests");
4813 err = -EIO;
4814 }
4815
4816 return err;
4817 }
4818 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
4819
4820 /**
4821 * ufshcd_hba_stop - Send controller to reset state
4822 * @hba: per adapter instance
4823 */
ufshcd_hba_stop(struct ufs_hba * hba)4824 void ufshcd_hba_stop(struct ufs_hba *hba)
4825 {
4826 int err;
4827
4828 ufshcd_disable_irq(hba);
4829 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4830 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4831 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4832 10, 1);
4833 ufshcd_enable_irq(hba);
4834 if (err)
4835 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4836 }
4837 EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
4838
4839 /**
4840 * ufshcd_hba_execute_hce - initialize the controller
4841 * @hba: per adapter instance
4842 *
4843 * The controller resets itself and controller firmware initialization
4844 * sequence kicks off. When controller is ready it will set
4845 * the Host Controller Enable bit to 1.
4846 *
4847 * Return: 0 on success, non-zero value on failure.
4848 */
ufshcd_hba_execute_hce(struct ufs_hba * hba)4849 static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4850 {
4851 int retry;
4852
4853 for (retry = 3; retry > 0; retry--) {
4854 if (ufshcd_is_hba_active(hba))
4855 /* change controller state to "reset state" */
4856 ufshcd_hba_stop(hba);
4857
4858 /* UniPro link is disabled at this point */
4859 ufshcd_set_link_off(hba);
4860
4861 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4862
4863 /* start controller initialization sequence */
4864 ufshcd_hba_start(hba);
4865
4866 /*
4867 * To initialize a UFS host controller HCE bit must be set to 1.
4868 * During initialization the HCE bit value changes from 1->0->1.
4869 * When the host controller completes initialization sequence
4870 * it sets the value of HCE bit to 1. The same HCE bit is read back
4871 * to check if the controller has completed initialization sequence.
4872 * So without this delay the value HCE = 1, set in the previous
4873 * instruction might be read back.
4874 * This delay can be changed based on the controller.
4875 */
4876 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
4877
4878 /* wait for the host controller to complete initialization */
4879 if (!ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, CONTROLLER_ENABLE,
4880 CONTROLLER_ENABLE, 1000, 50))
4881 break;
4882
4883 dev_err(hba->dev, "Enabling the controller failed\n");
4884 }
4885
4886 if (!retry)
4887 return -EIO;
4888
4889 /* enable UIC related interrupts */
4890 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4891
4892 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4893
4894 return 0;
4895 }
4896
ufshcd_hba_enable(struct ufs_hba * hba)4897 int ufshcd_hba_enable(struct ufs_hba *hba)
4898 {
4899 int ret;
4900
4901 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4902 ufshcd_set_link_off(hba);
4903 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4904
4905 /* enable UIC related interrupts */
4906 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4907 ret = ufshcd_dme_reset(hba);
4908 if (ret) {
4909 dev_err(hba->dev, "DME_RESET failed\n");
4910 return ret;
4911 }
4912
4913 ret = ufshcd_dme_enable(hba);
4914 if (ret) {
4915 dev_err(hba->dev, "Enabling DME failed\n");
4916 return ret;
4917 }
4918
4919 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4920 } else {
4921 ret = ufshcd_hba_execute_hce(hba);
4922 }
4923
4924 return ret;
4925 }
4926 EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
4927
ufshcd_disable_tx_lcc(struct ufs_hba * hba,bool peer)4928 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4929 {
4930 int tx_lanes = 0, i, err = 0;
4931
4932 if (!peer)
4933 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4934 &tx_lanes);
4935 else
4936 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4937 &tx_lanes);
4938 for (i = 0; i < tx_lanes; i++) {
4939 if (!peer)
4940 err = ufshcd_dme_set(hba,
4941 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4942 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4943 0);
4944 else
4945 err = ufshcd_dme_peer_set(hba,
4946 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4947 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4948 0);
4949 if (err) {
4950 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4951 __func__, peer, i, err);
4952 break;
4953 }
4954 }
4955
4956 return err;
4957 }
4958
ufshcd_disable_device_tx_lcc(struct ufs_hba * hba)4959 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4960 {
4961 return ufshcd_disable_tx_lcc(hba, true);
4962 }
4963
ufshcd_update_evt_hist(struct ufs_hba * hba,u32 id,u32 val)4964 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
4965 {
4966 struct ufs_event_hist *e;
4967
4968 if (id >= UFS_EVT_CNT)
4969 return;
4970
4971 e = &hba->ufs_stats.event[id];
4972 e->val[e->pos] = val;
4973 e->tstamp[e->pos] = local_clock();
4974 e->cnt += 1;
4975 e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
4976
4977 ufshcd_vops_event_notify(hba, id, &val);
4978 }
4979 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
4980
4981 /**
4982 * ufshcd_link_startup - Initialize unipro link startup
4983 * @hba: per adapter instance
4984 *
4985 * Return: 0 for success, non-zero in case of failure.
4986 */
ufshcd_link_startup(struct ufs_hba * hba)4987 static int ufshcd_link_startup(struct ufs_hba *hba)
4988 {
4989 int ret;
4990 int retries = DME_LINKSTARTUP_RETRIES;
4991 bool link_startup_again = false;
4992
4993 /*
4994 * If UFS device isn't active then we will have to issue link startup
4995 * 2 times to make sure the device state move to active.
4996 */
4997 if (!ufshcd_is_ufs_dev_active(hba))
4998 link_startup_again = true;
4999
5000 link_startup:
5001 do {
5002 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
5003
5004 ret = ufshcd_dme_link_startup(hba);
5005
5006 /* check if device is detected by inter-connect layer */
5007 if (!ret && !ufshcd_is_device_present(hba)) {
5008 ufshcd_update_evt_hist(hba,
5009 UFS_EVT_LINK_STARTUP_FAIL,
5010 0);
5011 dev_err(hba->dev, "%s: Device not present\n", __func__);
5012 ret = -ENXIO;
5013 goto out;
5014 }
5015
5016 /*
5017 * DME link lost indication is only received when link is up,
5018 * but we can't be sure if the link is up until link startup
5019 * succeeds. So reset the local Uni-Pro and try again.
5020 */
5021 if (ret && retries && ufshcd_hba_enable(hba)) {
5022 ufshcd_update_evt_hist(hba,
5023 UFS_EVT_LINK_STARTUP_FAIL,
5024 (u32)ret);
5025 goto out;
5026 }
5027 } while (ret && retries--);
5028
5029 if (ret) {
5030 /* failed to get the link up... retire */
5031 ufshcd_update_evt_hist(hba,
5032 UFS_EVT_LINK_STARTUP_FAIL,
5033 (u32)ret);
5034 goto out;
5035 }
5036
5037 if (link_startup_again) {
5038 link_startup_again = false;
5039 retries = DME_LINKSTARTUP_RETRIES;
5040 goto link_startup;
5041 }
5042
5043 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5044 ufshcd_init_pwr_info(hba);
5045 ufshcd_print_pwr_info(hba);
5046
5047 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
5048 ret = ufshcd_disable_device_tx_lcc(hba);
5049 if (ret)
5050 goto out;
5051 }
5052
5053 /* Include any host controller configuration via UIC commands */
5054 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
5055 if (ret)
5056 goto out;
5057
5058 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
5059 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5060 ret = ufshcd_make_hba_operational(hba);
5061 out:
5062 if (ret) {
5063 dev_err(hba->dev, "link startup failed %d\n", ret);
5064 ufshcd_print_host_state(hba);
5065 ufshcd_print_pwr_info(hba);
5066 ufshcd_print_evt_hist(hba);
5067 }
5068 return ret;
5069 }
5070
5071 /**
5072 * ufshcd_verify_dev_init() - Verify device initialization
5073 * @hba: per-adapter instance
5074 *
5075 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5076 * device Transport Protocol (UTP) layer is ready after a reset.
5077 * If the UTP layer at the device side is not initialized, it may
5078 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5079 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5080 *
5081 * Return: 0 upon success; < 0 upon failure.
5082 */
ufshcd_verify_dev_init(struct ufs_hba * hba)5083 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
5084 {
5085 int err = 0;
5086 int retries;
5087
5088 ufshcd_dev_man_lock(hba);
5089
5090 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
5091 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
5092 hba->nop_out_timeout);
5093
5094 if (!err || err == -ETIMEDOUT)
5095 break;
5096
5097 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
5098 }
5099
5100 ufshcd_dev_man_unlock(hba);
5101
5102 if (err)
5103 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
5104 return err;
5105 }
5106
5107 /**
5108 * ufshcd_setup_links - associate link b/w device wlun and other luns
5109 * @sdev: pointer to SCSI device
5110 * @hba: pointer to ufs hba
5111 */
ufshcd_setup_links(struct ufs_hba * hba,struct scsi_device * sdev)5112 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
5113 {
5114 struct device_link *link;
5115
5116 /*
5117 * Device wlun is the supplier & rest of the luns are consumers.
5118 * This ensures that device wlun suspends after all other luns.
5119 */
5120 if (hba->ufs_device_wlun) {
5121 link = device_link_add(&sdev->sdev_gendev,
5122 &hba->ufs_device_wlun->sdev_gendev,
5123 DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
5124 if (!link) {
5125 dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
5126 dev_name(&hba->ufs_device_wlun->sdev_gendev));
5127 return;
5128 }
5129 hba->luns_avail--;
5130 /* Ignore REPORT_LUN wlun probing */
5131 if (hba->luns_avail == 1) {
5132 ufshcd_rpm_put(hba);
5133 return;
5134 }
5135 } else {
5136 /*
5137 * Device wlun is probed. The assumption is that WLUNs are
5138 * scanned before other LUNs.
5139 */
5140 hba->luns_avail--;
5141 }
5142 }
5143
5144 /**
5145 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5146 * @hba: per-adapter instance
5147 * @sdev: pointer to SCSI device
5148 */
ufshcd_lu_init(struct ufs_hba * hba,struct scsi_device * sdev)5149 static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
5150 {
5151 int len = QUERY_DESC_MAX_SIZE;
5152 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
5153 u8 lun_qdepth = hba->nutrs;
5154 u8 *desc_buf;
5155 int ret;
5156
5157 desc_buf = kzalloc(len, GFP_KERNEL);
5158 if (!desc_buf)
5159 goto set_qdepth;
5160
5161 ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
5162 if (ret < 0) {
5163 if (ret == -EOPNOTSUPP)
5164 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5165 lun_qdepth = 1;
5166 kfree(desc_buf);
5167 goto set_qdepth;
5168 }
5169
5170 if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
5171 /*
5172 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5173 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5174 */
5175 lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
5176 }
5177 /*
5178 * According to UFS device specification, the write protection mode is only supported by
5179 * normal LU, not supported by WLUN.
5180 */
5181 if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
5182 !hba->dev_info.is_lu_power_on_wp &&
5183 desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
5184 hba->dev_info.is_lu_power_on_wp = true;
5185
5186 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5187 if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
5188 desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
5189 hba->dev_info.b_advanced_rpmb_en = true;
5190
5191
5192 kfree(desc_buf);
5193 set_qdepth:
5194 /*
5195 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5196 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5197 */
5198 dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
5199 scsi_change_queue_depth(sdev, lun_qdepth);
5200 }
5201
5202 /**
5203 * ufshcd_sdev_init - handle initial SCSI device configurations
5204 * @sdev: pointer to SCSI device
5205 *
5206 * Return: success.
5207 */
ufshcd_sdev_init(struct scsi_device * sdev)5208 static int ufshcd_sdev_init(struct scsi_device *sdev)
5209 {
5210 struct ufs_hba *hba;
5211
5212 hba = shost_priv(sdev->host);
5213
5214 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5215 sdev->use_10_for_ms = 1;
5216
5217 /* DBD field should be set to 1 in mode sense(10) */
5218 sdev->set_dbd_for_ms = 1;
5219
5220 /* allow SCSI layer to restart the device in case of errors */
5221 sdev->allow_restart = 1;
5222
5223 /* REPORT SUPPORTED OPERATION CODES is not supported */
5224 sdev->no_report_opcodes = 1;
5225
5226 /* WRITE_SAME command is not supported */
5227 sdev->no_write_same = 1;
5228
5229 ufshcd_lu_init(hba, sdev);
5230
5231 ufshcd_setup_links(hba, sdev);
5232
5233 return 0;
5234 }
5235
5236 /**
5237 * ufshcd_change_queue_depth - change queue depth
5238 * @sdev: pointer to SCSI device
5239 * @depth: required depth to set
5240 *
5241 * Change queue depth and make sure the max. limits are not crossed.
5242 *
5243 * Return: new queue depth.
5244 */
ufshcd_change_queue_depth(struct scsi_device * sdev,int depth)5245 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
5246 {
5247 return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
5248 }
5249
5250 /**
5251 * ufshcd_sdev_configure - adjust SCSI device configurations
5252 * @sdev: pointer to SCSI device
5253 * @lim: queue limits
5254 *
5255 * Return: 0 (success).
5256 */
ufshcd_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)5257 static int ufshcd_sdev_configure(struct scsi_device *sdev,
5258 struct queue_limits *lim)
5259 {
5260 struct ufs_hba *hba = shost_priv(sdev->host);
5261 struct request_queue *q = sdev->request_queue;
5262
5263 lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
5264
5265 /*
5266 * Block runtime-pm until all consumers are added.
5267 * Refer ufshcd_setup_links().
5268 */
5269 if (is_device_wlun(sdev))
5270 pm_runtime_get_noresume(&sdev->sdev_gendev);
5271 else if (ufshcd_is_rpm_autosuspend_allowed(hba))
5272 sdev->rpm_autosuspend = 1;
5273 /*
5274 * Do not print messages during runtime PM to avoid never-ending cycles
5275 * of messages written back to storage by user space causing runtime
5276 * resume, causing more messages and so on.
5277 */
5278 sdev->silence_suspend = 1;
5279
5280 if (hba->vops && hba->vops->config_scsi_dev)
5281 hba->vops->config_scsi_dev(sdev);
5282
5283 ufshcd_crypto_register(hba, q);
5284
5285 return 0;
5286 }
5287
5288 /**
5289 * ufshcd_sdev_destroy - remove SCSI device configurations
5290 * @sdev: pointer to SCSI device
5291 */
ufshcd_sdev_destroy(struct scsi_device * sdev)5292 static void ufshcd_sdev_destroy(struct scsi_device *sdev)
5293 {
5294 struct ufs_hba *hba;
5295 unsigned long flags;
5296
5297 hba = shost_priv(sdev->host);
5298
5299 /* Drop the reference as it won't be needed anymore */
5300 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
5301 spin_lock_irqsave(hba->host->host_lock, flags);
5302 hba->ufs_device_wlun = NULL;
5303 spin_unlock_irqrestore(hba->host->host_lock, flags);
5304 } else if (hba->ufs_device_wlun) {
5305 struct device *supplier = NULL;
5306
5307 /* Ensure UFS Device WLUN exists and does not disappear */
5308 spin_lock_irqsave(hba->host->host_lock, flags);
5309 if (hba->ufs_device_wlun) {
5310 supplier = &hba->ufs_device_wlun->sdev_gendev;
5311 get_device(supplier);
5312 }
5313 spin_unlock_irqrestore(hba->host->host_lock, flags);
5314
5315 if (supplier) {
5316 /*
5317 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5318 * device will not have been registered but can still
5319 * have a device link holding a reference to the device.
5320 */
5321 device_link_remove(&sdev->sdev_gendev, supplier);
5322 put_device(supplier);
5323 }
5324 }
5325 }
5326
5327 /**
5328 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5329 * @lrbp: pointer to local reference block of completed command
5330 * @scsi_status: SCSI command status
5331 *
5332 * Return: value base on SCSI command status.
5333 */
5334 static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb * lrbp,int scsi_status)5335 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
5336 {
5337 int result = 0;
5338
5339 switch (scsi_status) {
5340 case SAM_STAT_CHECK_CONDITION:
5341 ufshcd_copy_sense_data(lrbp);
5342 fallthrough;
5343 case SAM_STAT_GOOD:
5344 result |= DID_OK << 16 | scsi_status;
5345 break;
5346 case SAM_STAT_TASK_SET_FULL:
5347 case SAM_STAT_BUSY:
5348 case SAM_STAT_TASK_ABORTED:
5349 ufshcd_copy_sense_data(lrbp);
5350 result |= scsi_status;
5351 break;
5352 default:
5353 result |= DID_ERROR << 16;
5354 break;
5355 } /* end of switch */
5356
5357 return result;
5358 }
5359
5360 /**
5361 * ufshcd_transfer_rsp_status - Get overall status of the response
5362 * @hba: per adapter instance
5363 * @lrbp: pointer to local reference block of completed command
5364 * @cqe: pointer to the completion queue entry
5365 *
5366 * Return: result of the command to notify SCSI midlayer.
5367 */
5368 static inline int
ufshcd_transfer_rsp_status(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct cq_entry * cqe)5369 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
5370 struct cq_entry *cqe)
5371 {
5372 int result = 0;
5373 int scsi_status;
5374 enum utp_ocs ocs;
5375 u8 upiu_flags;
5376 u32 resid;
5377
5378 upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
5379 resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
5380 /*
5381 * Test !overflow instead of underflow to support UFS devices that do
5382 * not set either flag.
5383 */
5384 if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
5385 scsi_set_resid(lrbp->cmd, resid);
5386
5387 /* overall command status of utrd */
5388 ocs = ufshcd_get_tr_ocs(lrbp, cqe);
5389
5390 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
5391 if (lrbp->ucd_rsp_ptr->header.response ||
5392 lrbp->ucd_rsp_ptr->header.status)
5393 ocs = OCS_SUCCESS;
5394 }
5395
5396 switch (ocs) {
5397 case OCS_SUCCESS:
5398 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
5399 switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
5400 case UPIU_TRANSACTION_RESPONSE:
5401 /*
5402 * get the result based on SCSI status response
5403 * to notify the SCSI midlayer of the command status
5404 */
5405 scsi_status = lrbp->ucd_rsp_ptr->header.status;
5406 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
5407
5408 /*
5409 * Currently we are only supporting BKOPs exception
5410 * events hence we can ignore BKOPs exception event
5411 * during power management callbacks. BKOPs exception
5412 * event is not expected to be raised in runtime suspend
5413 * callback as it allows the urgent bkops.
5414 * During system suspend, we are anyway forcefully
5415 * disabling the bkops and if urgent bkops is needed
5416 * it will be enabled on system resume. Long term
5417 * solution could be to abort the system suspend if
5418 * UFS device needs urgent BKOPs.
5419 */
5420 if (!hba->pm_op_in_progress &&
5421 !ufshcd_eh_in_progress(hba) &&
5422 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
5423 /* Flushed in suspend */
5424 schedule_work(&hba->eeh_work);
5425 break;
5426 case UPIU_TRANSACTION_REJECT_UPIU:
5427 /* TODO: handle Reject UPIU Response */
5428 result = DID_ERROR << 16;
5429 dev_err(hba->dev,
5430 "Reject UPIU not fully implemented\n");
5431 break;
5432 default:
5433 dev_err(hba->dev,
5434 "Unexpected request response code = %x\n",
5435 result);
5436 result = DID_ERROR << 16;
5437 break;
5438 }
5439 break;
5440 case OCS_ABORTED:
5441 case OCS_INVALID_COMMAND_STATUS:
5442 result |= DID_REQUEUE << 16;
5443 dev_warn(hba->dev,
5444 "OCS %s from controller for tag %d\n",
5445 (ocs == OCS_ABORTED ? "aborted" : "invalid"),
5446 lrbp->task_tag);
5447 break;
5448 case OCS_INVALID_CMD_TABLE_ATTR:
5449 case OCS_INVALID_PRDT_ATTR:
5450 case OCS_MISMATCH_DATA_BUF_SIZE:
5451 case OCS_MISMATCH_RESP_UPIU_SIZE:
5452 case OCS_PEER_COMM_FAILURE:
5453 case OCS_FATAL_ERROR:
5454 case OCS_DEVICE_FATAL_ERROR:
5455 case OCS_INVALID_CRYPTO_CONFIG:
5456 case OCS_GENERAL_CRYPTO_ERROR:
5457 default:
5458 result |= DID_ERROR << 16;
5459 dev_err(hba->dev,
5460 "OCS error from controller = %x for tag %d\n",
5461 ocs, lrbp->task_tag);
5462 ufshcd_print_evt_hist(hba);
5463 ufshcd_print_host_state(hba);
5464 break;
5465 } /* end of switch */
5466
5467 if ((host_byte(result) != DID_OK) &&
5468 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
5469 ufshcd_print_tr(hba, lrbp->task_tag, true);
5470 return result;
5471 }
5472
ufshcd_is_auto_hibern8_error(struct ufs_hba * hba,u32 intr_mask)5473 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
5474 u32 intr_mask)
5475 {
5476 if (!ufshcd_is_auto_hibern8_supported(hba) ||
5477 !ufshcd_is_auto_hibern8_enabled(hba))
5478 return false;
5479
5480 if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
5481 return false;
5482
5483 if (hba->active_uic_cmd &&
5484 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
5485 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
5486 return false;
5487
5488 return true;
5489 }
5490
5491 /**
5492 * ufshcd_uic_cmd_compl - handle completion of uic command
5493 * @hba: per adapter instance
5494 * @intr_status: interrupt status generated by the controller
5495 *
5496 * Return:
5497 * IRQ_HANDLED - If interrupt is valid
5498 * IRQ_NONE - If invalid interrupt
5499 */
ufshcd_uic_cmd_compl(struct ufs_hba * hba,u32 intr_status)5500 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
5501 {
5502 irqreturn_t retval = IRQ_NONE;
5503 struct uic_command *cmd;
5504
5505 spin_lock(hba->host->host_lock);
5506 cmd = hba->active_uic_cmd;
5507 if (WARN_ON_ONCE(!cmd))
5508 goto unlock;
5509
5510 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
5511 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
5512
5513 if (intr_status & UIC_COMMAND_COMPL) {
5514 cmd->argument2 |= ufshcd_get_uic_cmd_result(hba);
5515 cmd->argument3 = ufshcd_get_dme_attr_val(hba);
5516 if (!hba->uic_async_done)
5517 cmd->cmd_active = 0;
5518 complete(&cmd->done);
5519 retval = IRQ_HANDLED;
5520 }
5521
5522 if (intr_status & UFSHCD_UIC_PWR_MASK && hba->uic_async_done) {
5523 cmd->cmd_active = 0;
5524 complete(hba->uic_async_done);
5525 retval = IRQ_HANDLED;
5526 }
5527
5528 if (retval == IRQ_HANDLED)
5529 ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
5530
5531 unlock:
5532 spin_unlock(hba->host->host_lock);
5533
5534 return retval;
5535 }
5536
5537 /* Release the resources allocated for processing a SCSI command. */
ufshcd_release_scsi_cmd(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)5538 void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
5539 struct ufshcd_lrb *lrbp)
5540 {
5541 struct scsi_cmnd *cmd = lrbp->cmd;
5542
5543 scsi_dma_unmap(cmd);
5544 ufshcd_crypto_clear_prdt(hba, lrbp);
5545 ufshcd_release(hba);
5546 ufshcd_clk_scaling_update_busy(hba);
5547 }
5548
5549 /**
5550 * ufshcd_compl_one_cqe - handle a completion queue entry
5551 * @hba: per adapter instance
5552 * @task_tag: the task tag of the request to be completed
5553 * @cqe: pointer to the completion queue entry
5554 */
ufshcd_compl_one_cqe(struct ufs_hba * hba,int task_tag,struct cq_entry * cqe)5555 void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5556 struct cq_entry *cqe)
5557 {
5558 struct ufshcd_lrb *lrbp;
5559 struct scsi_cmnd *cmd;
5560 enum utp_ocs ocs;
5561
5562 lrbp = &hba->lrb[task_tag];
5563 lrbp->compl_time_stamp = ktime_get();
5564 lrbp->compl_time_stamp_local_clock = local_clock();
5565 cmd = lrbp->cmd;
5566 if (cmd) {
5567 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
5568 ufshcd_update_monitor(hba, lrbp);
5569 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
5570 cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
5571 ufshcd_release_scsi_cmd(hba, lrbp);
5572 /* Do not touch lrbp after scsi done */
5573 scsi_done(cmd);
5574 } else {
5575 if (cqe) {
5576 ocs = le32_to_cpu(cqe->status) & MASK_OCS;
5577 lrbp->utr_descriptor_ptr->header.ocs = ocs;
5578 }
5579 complete(&hba->dev_cmd.complete);
5580 }
5581 }
5582
5583 /**
5584 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5585 * @hba: per adapter instance
5586 * @completed_reqs: bitmask that indicates which requests to complete
5587 */
__ufshcd_transfer_req_compl(struct ufs_hba * hba,unsigned long completed_reqs)5588 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
5589 unsigned long completed_reqs)
5590 {
5591 int tag;
5592
5593 for_each_set_bit(tag, &completed_reqs, hba->nutrs)
5594 ufshcd_compl_one_cqe(hba, tag, NULL);
5595 }
5596
5597 /* Any value that is not an existing queue number is fine for this constant. */
5598 enum {
5599 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
5600 };
5601
ufshcd_clear_polled(struct ufs_hba * hba,unsigned long * completed_reqs)5602 static void ufshcd_clear_polled(struct ufs_hba *hba,
5603 unsigned long *completed_reqs)
5604 {
5605 int tag;
5606
5607 for_each_set_bit(tag, completed_reqs, hba->nutrs) {
5608 struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
5609
5610 if (!cmd)
5611 continue;
5612 if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
5613 __clear_bit(tag, completed_reqs);
5614 }
5615 }
5616
5617 /*
5618 * Return: > 0 if one or more commands have been completed or 0 if no
5619 * requests have been completed.
5620 */
ufshcd_poll(struct Scsi_Host * shost,unsigned int queue_num)5621 static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
5622 {
5623 struct ufs_hba *hba = shost_priv(shost);
5624 unsigned long completed_reqs, flags;
5625 u32 tr_doorbell;
5626 struct ufs_hw_queue *hwq;
5627
5628 if (hba->mcq_enabled) {
5629 hwq = &hba->uhq[queue_num];
5630
5631 return ufshcd_mcq_poll_cqe_lock(hba, hwq);
5632 }
5633
5634 spin_lock_irqsave(&hba->outstanding_lock, flags);
5635 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5636 completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
5637 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
5638 "completed: %#lx; outstanding: %#lx\n", completed_reqs,
5639 hba->outstanding_reqs);
5640 if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
5641 /* Do not complete polled requests from interrupt context. */
5642 ufshcd_clear_polled(hba, &completed_reqs);
5643 }
5644 hba->outstanding_reqs &= ~completed_reqs;
5645 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
5646
5647 if (completed_reqs)
5648 __ufshcd_transfer_req_compl(hba, completed_reqs);
5649
5650 return completed_reqs != 0;
5651 }
5652
5653 /**
5654 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5655 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5656 * to complete the pending transfers and free the resources associated with
5657 * the scsi command.
5658 *
5659 * @hba: per adapter instance
5660 * @force_compl: This flag is set to true when invoked
5661 * from ufshcd_host_reset_and_restore() in which case it requires special
5662 * handling because the host controller has been reset by ufshcd_hba_stop().
5663 */
ufshcd_mcq_compl_pending_transfer(struct ufs_hba * hba,bool force_compl)5664 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
5665 bool force_compl)
5666 {
5667 struct ufs_hw_queue *hwq;
5668 struct ufshcd_lrb *lrbp;
5669 struct scsi_cmnd *cmd;
5670 unsigned long flags;
5671 int tag;
5672
5673 for (tag = 0; tag < hba->nutrs; tag++) {
5674 lrbp = &hba->lrb[tag];
5675 cmd = lrbp->cmd;
5676 if (!ufshcd_cmd_inflight(cmd) ||
5677 test_bit(SCMD_STATE_COMPLETE, &cmd->state))
5678 continue;
5679
5680 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
5681 if (!hwq)
5682 continue;
5683
5684 if (force_compl) {
5685 ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
5686 /*
5687 * For those cmds of which the cqes are not present
5688 * in the cq, complete them explicitly.
5689 */
5690 spin_lock_irqsave(&hwq->cq_lock, flags);
5691 if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
5692 set_host_byte(cmd, DID_REQUEUE);
5693 ufshcd_release_scsi_cmd(hba, lrbp);
5694 scsi_done(cmd);
5695 }
5696 spin_unlock_irqrestore(&hwq->cq_lock, flags);
5697 } else {
5698 ufshcd_mcq_poll_cqe_lock(hba, hwq);
5699 }
5700 }
5701 }
5702
5703 /**
5704 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5705 * @hba: per adapter instance
5706 *
5707 * Return:
5708 * IRQ_HANDLED - If interrupt is valid
5709 * IRQ_NONE - If invalid interrupt
5710 */
ufshcd_transfer_req_compl(struct ufs_hba * hba)5711 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
5712 {
5713 /* Resetting interrupt aggregation counters first and reading the
5714 * DOOR_BELL afterward allows us to handle all the completed requests.
5715 * In order to prevent other interrupts starvation the DB is read once
5716 * after reset. The down side of this solution is the possibility of
5717 * false interrupt if device completes another request after resetting
5718 * aggregation and before reading the DB.
5719 */
5720 if (ufshcd_is_intr_aggr_allowed(hba) &&
5721 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
5722 ufshcd_reset_intr_aggr(hba);
5723
5724 if (ufs_fail_completion(hba))
5725 return IRQ_HANDLED;
5726
5727 /*
5728 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5729 * do not want polling to trigger spurious interrupt complaints.
5730 */
5731 ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
5732
5733 return IRQ_HANDLED;
5734 }
5735
__ufshcd_write_ee_control(struct ufs_hba * hba,u32 ee_ctrl_mask)5736 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
5737 {
5738 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5739 QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
5740 &ee_ctrl_mask);
5741 }
5742
ufshcd_write_ee_control(struct ufs_hba * hba)5743 int ufshcd_write_ee_control(struct ufs_hba *hba)
5744 {
5745 int err;
5746
5747 mutex_lock(&hba->ee_ctrl_mutex);
5748 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
5749 mutex_unlock(&hba->ee_ctrl_mutex);
5750 if (err)
5751 dev_err(hba->dev, "%s: failed to write ee control %d\n",
5752 __func__, err);
5753 return err;
5754 }
5755
ufshcd_update_ee_control(struct ufs_hba * hba,u16 * mask,const u16 * other_mask,u16 set,u16 clr)5756 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
5757 const u16 *other_mask, u16 set, u16 clr)
5758 {
5759 u16 new_mask, ee_ctrl_mask;
5760 int err = 0;
5761
5762 mutex_lock(&hba->ee_ctrl_mutex);
5763 new_mask = (*mask & ~clr) | set;
5764 ee_ctrl_mask = new_mask | *other_mask;
5765 if (ee_ctrl_mask != hba->ee_ctrl_mask)
5766 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
5767 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5768 if (!err) {
5769 hba->ee_ctrl_mask = ee_ctrl_mask;
5770 *mask = new_mask;
5771 }
5772 mutex_unlock(&hba->ee_ctrl_mutex);
5773 return err;
5774 }
5775
5776 /**
5777 * ufshcd_disable_ee - disable exception event
5778 * @hba: per-adapter instance
5779 * @mask: exception event to disable
5780 *
5781 * Disables exception event in the device so that the EVENT_ALERT
5782 * bit is not set.
5783 *
5784 * Return: zero on success, non-zero error value on failure.
5785 */
ufshcd_disable_ee(struct ufs_hba * hba,u16 mask)5786 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
5787 {
5788 return ufshcd_update_ee_drv_mask(hba, 0, mask);
5789 }
5790
5791 /**
5792 * ufshcd_enable_ee - enable exception event
5793 * @hba: per-adapter instance
5794 * @mask: exception event to enable
5795 *
5796 * Enable corresponding exception event in the device to allow
5797 * device to alert host in critical scenarios.
5798 *
5799 * Return: zero on success, non-zero error value on failure.
5800 */
ufshcd_enable_ee(struct ufs_hba * hba,u16 mask)5801 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5802 {
5803 return ufshcd_update_ee_drv_mask(hba, mask, 0);
5804 }
5805
5806 /**
5807 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5808 * @hba: per-adapter instance
5809 *
5810 * Allow device to manage background operations on its own. Enabling
5811 * this might lead to inconsistent latencies during normal data transfers
5812 * as the device is allowed to manage its own way of handling background
5813 * operations.
5814 *
5815 * Return: zero on success, non-zero on failure.
5816 */
ufshcd_enable_auto_bkops(struct ufs_hba * hba)5817 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5818 {
5819 int err = 0;
5820
5821 if (hba->auto_bkops_enabled)
5822 goto out;
5823
5824 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5825 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5826 if (err) {
5827 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5828 __func__, err);
5829 goto out;
5830 }
5831
5832 hba->auto_bkops_enabled = true;
5833 trace_ufshcd_auto_bkops_state(hba, "Enabled");
5834
5835 /* No need of URGENT_BKOPS exception from the device */
5836 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5837 if (err)
5838 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5839 __func__, err);
5840 out:
5841 return err;
5842 }
5843
5844 /**
5845 * ufshcd_disable_auto_bkops - block device in doing background operations
5846 * @hba: per-adapter instance
5847 *
5848 * Disabling background operations improves command response latency but
5849 * has drawback of device moving into critical state where the device is
5850 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5851 * host is idle so that BKOPS are managed effectively without any negative
5852 * impacts.
5853 *
5854 * Return: zero on success, non-zero on failure.
5855 */
ufshcd_disable_auto_bkops(struct ufs_hba * hba)5856 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5857 {
5858 int err = 0;
5859
5860 if (!hba->auto_bkops_enabled)
5861 goto out;
5862
5863 /*
5864 * If host assisted BKOPs is to be enabled, make sure
5865 * urgent bkops exception is allowed.
5866 */
5867 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5868 if (err) {
5869 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5870 __func__, err);
5871 goto out;
5872 }
5873
5874 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5875 QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
5876 if (err) {
5877 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5878 __func__, err);
5879 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5880 goto out;
5881 }
5882
5883 hba->auto_bkops_enabled = false;
5884 trace_ufshcd_auto_bkops_state(hba, "Disabled");
5885 hba->is_urgent_bkops_lvl_checked = false;
5886 out:
5887 return err;
5888 }
5889
5890 /**
5891 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5892 * @hba: per adapter instance
5893 *
5894 * After a device reset the device may toggle the BKOPS_EN flag
5895 * to default value. The s/w tracking variables should be updated
5896 * as well. This function would change the auto-bkops state based on
5897 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5898 */
ufshcd_force_reset_auto_bkops(struct ufs_hba * hba)5899 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5900 {
5901 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5902 hba->auto_bkops_enabled = false;
5903 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5904 ufshcd_enable_auto_bkops(hba);
5905 } else {
5906 hba->auto_bkops_enabled = true;
5907 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5908 ufshcd_disable_auto_bkops(hba);
5909 }
5910 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
5911 hba->is_urgent_bkops_lvl_checked = false;
5912 }
5913
ufshcd_get_bkops_status(struct ufs_hba * hba,u32 * status)5914 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5915 {
5916 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5917 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5918 }
5919
5920 /**
5921 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5922 * @hba: per-adapter instance
5923 *
5924 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5925 * flag in the device to permit background operations if the device
5926 * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
5927 * disable otherwise.
5928 *
5929 * Return: 0 for success, non-zero in case of failure.
5930 *
5931 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5932 * to know whether auto bkops is enabled or disabled after this function
5933 * returns control to it.
5934 */
ufshcd_bkops_ctrl(struct ufs_hba * hba)5935 static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
5936 {
5937 enum bkops_status status = hba->urgent_bkops_lvl;
5938 u32 curr_status = 0;
5939 int err;
5940
5941 err = ufshcd_get_bkops_status(hba, &curr_status);
5942 if (err) {
5943 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5944 __func__, err);
5945 goto out;
5946 } else if (curr_status > BKOPS_STATUS_MAX) {
5947 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5948 __func__, curr_status);
5949 err = -EINVAL;
5950 goto out;
5951 }
5952
5953 if (curr_status >= status)
5954 err = ufshcd_enable_auto_bkops(hba);
5955 else
5956 err = ufshcd_disable_auto_bkops(hba);
5957 out:
5958 return err;
5959 }
5960
ufshcd_get_ee_status(struct ufs_hba * hba,u32 * status)5961 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5962 {
5963 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5964 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5965 }
5966
ufshcd_bkops_exception_event_handler(struct ufs_hba * hba)5967 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5968 {
5969 int err;
5970 u32 curr_status = 0;
5971
5972 if (hba->is_urgent_bkops_lvl_checked)
5973 goto enable_auto_bkops;
5974
5975 err = ufshcd_get_bkops_status(hba, &curr_status);
5976 if (err) {
5977 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5978 __func__, err);
5979 goto out;
5980 }
5981
5982 /*
5983 * We are seeing that some devices are raising the urgent bkops
5984 * exception events even when BKOPS status doesn't indicate performace
5985 * impacted or critical. Handle these device by determining their urgent
5986 * bkops status at runtime.
5987 */
5988 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5989 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5990 __func__, curr_status);
5991 /* update the current status as the urgent bkops level */
5992 hba->urgent_bkops_lvl = curr_status;
5993 hba->is_urgent_bkops_lvl_checked = true;
5994 }
5995
5996 enable_auto_bkops:
5997 err = ufshcd_enable_auto_bkops(hba);
5998 out:
5999 if (err < 0)
6000 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
6001 __func__, err);
6002 }
6003
ufshcd_read_device_lvl_exception_id(struct ufs_hba * hba,u64 * exception_id)6004 int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
6005 {
6006 struct utp_upiu_query_v4_0 *upiu_resp;
6007 struct ufs_query_req *request = NULL;
6008 struct ufs_query_res *response = NULL;
6009 int err;
6010
6011 if (hba->dev_info.wspecversion < 0x410)
6012 return -EOPNOTSUPP;
6013
6014 ufshcd_hold(hba);
6015 mutex_lock(&hba->dev_cmd.lock);
6016
6017 ufshcd_init_query(hba, &request, &response,
6018 UPIU_QUERY_OPCODE_READ_ATTR,
6019 QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
6020
6021 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
6022
6023 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
6024
6025 if (err) {
6026 dev_err(hba->dev, "%s: failed to read device level exception %d\n",
6027 __func__, err);
6028 goto out;
6029 }
6030
6031 upiu_resp = (struct utp_upiu_query_v4_0 *)response;
6032 *exception_id = get_unaligned_be64(&upiu_resp->osf3);
6033 out:
6034 mutex_unlock(&hba->dev_cmd.lock);
6035 ufshcd_release(hba);
6036
6037 return err;
6038 }
6039
__ufshcd_wb_toggle(struct ufs_hba * hba,bool set,enum flag_idn idn)6040 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
6041 {
6042 u8 index;
6043 enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
6044 UPIU_QUERY_OPCODE_CLEAR_FLAG;
6045
6046 index = ufshcd_wb_get_query_index(hba);
6047 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
6048 }
6049
ufshcd_wb_toggle(struct ufs_hba * hba,bool enable)6050 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
6051 {
6052 int ret;
6053
6054 if (!ufshcd_is_wb_allowed(hba) ||
6055 hba->dev_info.wb_enabled == enable)
6056 return 0;
6057
6058 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
6059 if (ret) {
6060 dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
6061 __func__, enable ? "enabling" : "disabling", ret);
6062 return ret;
6063 }
6064
6065 hba->dev_info.wb_enabled = enable;
6066 dev_dbg(hba->dev, "%s: Write Booster %s\n",
6067 __func__, enable ? "enabled" : "disabled");
6068
6069 return ret;
6070 }
6071
ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba * hba,bool enable)6072 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
6073 bool enable)
6074 {
6075 int ret;
6076
6077 ret = __ufshcd_wb_toggle(hba, enable,
6078 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
6079 if (ret) {
6080 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
6081 __func__, enable ? "enabling" : "disabling", ret);
6082 return;
6083 }
6084 dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
6085 __func__, enable ? "enabled" : "disabled");
6086 }
6087
ufshcd_wb_toggle_buf_flush(struct ufs_hba * hba,bool enable)6088 int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
6089 {
6090 int ret;
6091
6092 if (!ufshcd_is_wb_allowed(hba) ||
6093 hba->dev_info.wb_buf_flush_enabled == enable)
6094 return 0;
6095
6096 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
6097 if (ret) {
6098 dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
6099 __func__, enable ? "enabling" : "disabling", ret);
6100 return ret;
6101 }
6102
6103 hba->dev_info.wb_buf_flush_enabled = enable;
6104 dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
6105 __func__, enable ? "enabled" : "disabled");
6106
6107 return ret;
6108 }
6109
ufshcd_wb_curr_buff_threshold_check(struct ufs_hba * hba,u32 avail_buf)6110 static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
6111 u32 avail_buf)
6112 {
6113 u32 cur_buf;
6114 int ret;
6115 u8 index;
6116
6117 index = ufshcd_wb_get_query_index(hba);
6118 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6119 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
6120 index, 0, &cur_buf);
6121 if (ret) {
6122 dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6123 __func__, ret);
6124 return false;
6125 }
6126
6127 if (!cur_buf) {
6128 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
6129 cur_buf);
6130 return false;
6131 }
6132 /* Let it continue to flush when available buffer exceeds threshold */
6133 return avail_buf < hba->vps->wb_flush_threshold;
6134 }
6135
ufshcd_wb_force_disable(struct ufs_hba * hba)6136 static void ufshcd_wb_force_disable(struct ufs_hba *hba)
6137 {
6138 if (ufshcd_is_wb_buf_flush_allowed(hba))
6139 ufshcd_wb_toggle_buf_flush(hba, false);
6140
6141 ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
6142 ufshcd_wb_toggle(hba, false);
6143 hba->caps &= ~UFSHCD_CAP_WB_EN;
6144
6145 dev_info(hba->dev, "%s: WB force disabled\n", __func__);
6146 }
6147
ufshcd_is_wb_buf_lifetime_available(struct ufs_hba * hba)6148 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
6149 {
6150 u32 lifetime;
6151 int ret;
6152 u8 index;
6153
6154 index = ufshcd_wb_get_query_index(hba);
6155 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6156 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
6157 index, 0, &lifetime);
6158 if (ret) {
6159 dev_err(hba->dev,
6160 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6161 __func__, ret);
6162 return false;
6163 }
6164
6165 if (lifetime == UFS_WB_EXCEED_LIFETIME) {
6166 dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
6167 __func__, lifetime);
6168 return false;
6169 }
6170
6171 dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
6172 __func__, lifetime);
6173
6174 return true;
6175 }
6176
ufshcd_wb_need_flush(struct ufs_hba * hba)6177 static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
6178 {
6179 int ret;
6180 u32 avail_buf;
6181 u8 index;
6182
6183 if (!ufshcd_is_wb_allowed(hba))
6184 return false;
6185
6186 if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
6187 ufshcd_wb_force_disable(hba);
6188 return false;
6189 }
6190
6191 /*
6192 * With user-space reduction enabled, it's enough to enable flush
6193 * by checking only the available buffer. The threshold
6194 * defined here is > 90% full.
6195 * With user-space preserved enabled, the current-buffer
6196 * should be checked too because the wb buffer size can reduce
6197 * when disk tends to be full. This info is provided by current
6198 * buffer (dCurrentWriteBoosterBufferSize).
6199 */
6200 index = ufshcd_wb_get_query_index(hba);
6201 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
6202 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
6203 index, 0, &avail_buf);
6204 if (ret) {
6205 dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6206 __func__, ret);
6207 return false;
6208 }
6209
6210 if (!hba->dev_info.b_presrv_uspc_en)
6211 return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
6212
6213 return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
6214 }
6215
ufshcd_rpm_dev_flush_recheck_work(struct work_struct * work)6216 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
6217 {
6218 struct ufs_hba *hba = container_of(to_delayed_work(work),
6219 struct ufs_hba,
6220 rpm_dev_flush_recheck_work);
6221 /*
6222 * To prevent unnecessary VCC power drain after device finishes
6223 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6224 * after a certain delay to recheck the threshold by next runtime
6225 * suspend.
6226 */
6227 ufshcd_rpm_get_sync(hba);
6228 ufshcd_rpm_put_sync(hba);
6229 }
6230
6231 /**
6232 * ufshcd_exception_event_handler - handle exceptions raised by device
6233 * @work: pointer to work data
6234 *
6235 * Read bExceptionEventStatus attribute from the device and handle the
6236 * exception event accordingly.
6237 */
ufshcd_exception_event_handler(struct work_struct * work)6238 static void ufshcd_exception_event_handler(struct work_struct *work)
6239 {
6240 struct ufs_hba *hba;
6241 int err;
6242 u32 status = 0;
6243 hba = container_of(work, struct ufs_hba, eeh_work);
6244
6245 err = ufshcd_get_ee_status(hba, &status);
6246 if (err) {
6247 dev_err(hba->dev, "%s: failed to get exception status %d\n",
6248 __func__, err);
6249 return;
6250 }
6251
6252 trace_ufshcd_exception_event(hba, status);
6253
6254 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
6255 ufshcd_bkops_exception_event_handler(hba);
6256
6257 if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
6258 ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
6259
6260 if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
6261 hba->critical_health_count++;
6262 sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
6263 }
6264
6265 if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
6266 atomic_inc(&hba->dev_lvl_exception_count);
6267 sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
6268 }
6269
6270 ufs_debugfs_exception_event(hba, status);
6271 }
6272
6273 /* Complete requests that have door-bell cleared */
ufshcd_complete_requests(struct ufs_hba * hba,bool force_compl)6274 static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
6275 {
6276 if (hba->mcq_enabled)
6277 ufshcd_mcq_compl_pending_transfer(hba, force_compl);
6278 else
6279 ufshcd_transfer_req_compl(hba);
6280
6281 ufshcd_tmc_handler(hba);
6282 }
6283
6284 /**
6285 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6286 * to recover from the DL NAC errors or not.
6287 * @hba: per-adapter instance
6288 *
6289 * Return: true if error handling is required, false otherwise.
6290 */
ufshcd_quirk_dl_nac_errors(struct ufs_hba * hba)6291 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
6292 {
6293 unsigned long flags;
6294 bool err_handling = true;
6295
6296 spin_lock_irqsave(hba->host->host_lock, flags);
6297 /*
6298 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6299 * device fatal error and/or DL NAC & REPLAY timeout errors.
6300 */
6301 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
6302 goto out;
6303
6304 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
6305 ((hba->saved_err & UIC_ERROR) &&
6306 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
6307 goto out;
6308
6309 if ((hba->saved_err & UIC_ERROR) &&
6310 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
6311 int err;
6312 /*
6313 * wait for 50ms to see if we can get any other errors or not.
6314 */
6315 spin_unlock_irqrestore(hba->host->host_lock, flags);
6316 msleep(50);
6317 spin_lock_irqsave(hba->host->host_lock, flags);
6318
6319 /*
6320 * now check if we have got any other severe errors other than
6321 * DL NAC error?
6322 */
6323 if ((hba->saved_err & INT_FATAL_ERRORS) ||
6324 ((hba->saved_err & UIC_ERROR) &&
6325 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
6326 goto out;
6327
6328 /*
6329 * As DL NAC is the only error received so far, send out NOP
6330 * command to confirm if link is still active or not.
6331 * - If we don't get any response then do error recovery.
6332 * - If we get response then clear the DL NAC error bit.
6333 */
6334
6335 spin_unlock_irqrestore(hba->host->host_lock, flags);
6336 err = ufshcd_verify_dev_init(hba);
6337 spin_lock_irqsave(hba->host->host_lock, flags);
6338
6339 if (err)
6340 goto out;
6341
6342 /* Link seems to be alive hence ignore the DL NAC errors */
6343 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
6344 hba->saved_err &= ~UIC_ERROR;
6345 /* clear NAC error */
6346 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6347 if (!hba->saved_uic_err)
6348 err_handling = false;
6349 }
6350 out:
6351 spin_unlock_irqrestore(hba->host->host_lock, flags);
6352 return err_handling;
6353 }
6354
6355 /* host lock must be held before calling this func */
ufshcd_is_saved_err_fatal(struct ufs_hba * hba)6356 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
6357 {
6358 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
6359 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
6360 }
6361
ufshcd_schedule_eh_work(struct ufs_hba * hba)6362 void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6363 {
6364 lockdep_assert_held(hba->host->host_lock);
6365
6366 /* handle fatal errors only when link is not in error state */
6367 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6368 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6369 ufshcd_is_saved_err_fatal(hba))
6370 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
6371 else
6372 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
6373 queue_work(hba->eh_wq, &hba->eh_work);
6374 }
6375 }
6376
ufshcd_force_error_recovery(struct ufs_hba * hba)6377 static void ufshcd_force_error_recovery(struct ufs_hba *hba)
6378 {
6379 spin_lock_irq(hba->host->host_lock);
6380 hba->force_reset = true;
6381 ufshcd_schedule_eh_work(hba);
6382 spin_unlock_irq(hba->host->host_lock);
6383 }
6384
ufshcd_clk_scaling_allow(struct ufs_hba * hba,bool allow)6385 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
6386 {
6387 mutex_lock(&hba->wb_mutex);
6388 down_write(&hba->clk_scaling_lock);
6389 hba->clk_scaling.is_allowed = allow;
6390 up_write(&hba->clk_scaling_lock);
6391 mutex_unlock(&hba->wb_mutex);
6392 }
6393
ufshcd_clk_scaling_suspend(struct ufs_hba * hba,bool suspend)6394 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
6395 {
6396 if (suspend) {
6397 if (hba->clk_scaling.is_enabled)
6398 ufshcd_suspend_clkscaling(hba);
6399 ufshcd_clk_scaling_allow(hba, false);
6400 } else {
6401 ufshcd_clk_scaling_allow(hba, true);
6402 if (hba->clk_scaling.is_enabled)
6403 ufshcd_resume_clkscaling(hba);
6404 }
6405 }
6406
ufshcd_err_handling_prepare(struct ufs_hba * hba)6407 static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
6408 {
6409 ufshcd_rpm_get_sync(hba);
6410 if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
6411 hba->is_sys_suspended) {
6412 enum ufs_pm_op pm_op;
6413
6414 /*
6415 * Don't assume anything of resume, if
6416 * resume fails, irq and clocks can be OFF, and powers
6417 * can be OFF or in LPM.
6418 */
6419 ufshcd_setup_hba_vreg(hba, true);
6420 ufshcd_enable_irq(hba);
6421 ufshcd_setup_vreg(hba, true);
6422 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
6423 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
6424 ufshcd_hold(hba);
6425 if (!ufshcd_is_clkgating_allowed(hba))
6426 ufshcd_setup_clocks(hba, true);
6427 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
6428 ufshcd_vops_resume(hba, pm_op);
6429 } else {
6430 ufshcd_hold(hba);
6431 if (ufshcd_is_clkscaling_supported(hba) &&
6432 hba->clk_scaling.is_enabled)
6433 ufshcd_suspend_clkscaling(hba);
6434 ufshcd_clk_scaling_allow(hba, false);
6435 }
6436 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6437 blk_mq_quiesce_tagset(&hba->host->tag_set);
6438 cancel_work_sync(&hba->eeh_work);
6439 }
6440
ufshcd_err_handling_unprepare(struct ufs_hba * hba)6441 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
6442 {
6443 blk_mq_unquiesce_tagset(&hba->host->tag_set);
6444 ufshcd_release(hba);
6445 if (ufshcd_is_clkscaling_supported(hba))
6446 ufshcd_clk_scaling_suspend(hba, false);
6447 ufshcd_rpm_put(hba);
6448 }
6449
ufshcd_err_handling_should_stop(struct ufs_hba * hba)6450 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
6451 {
6452 return (!hba->is_powered || hba->shutting_down ||
6453 !hba->ufs_device_wlun ||
6454 hba->ufshcd_state == UFSHCD_STATE_ERROR ||
6455 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
6456 ufshcd_is_link_broken(hba))));
6457 }
6458
6459 #ifdef CONFIG_PM
ufshcd_recover_pm_error(struct ufs_hba * hba)6460 static void ufshcd_recover_pm_error(struct ufs_hba *hba)
6461 {
6462 struct Scsi_Host *shost = hba->host;
6463 struct scsi_device *sdev;
6464 struct request_queue *q;
6465 int ret;
6466
6467 hba->is_sys_suspended = false;
6468 /*
6469 * Set RPM status of wlun device to RPM_ACTIVE,
6470 * this also clears its runtime error.
6471 */
6472 ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
6473
6474 /* hba device might have a runtime error otherwise */
6475 if (ret)
6476 ret = pm_runtime_set_active(hba->dev);
6477 /*
6478 * If wlun device had runtime error, we also need to resume those
6479 * consumer scsi devices in case any of them has failed to be
6480 * resumed due to supplier runtime resume failure. This is to unblock
6481 * blk_queue_enter in case there are bios waiting inside it.
6482 */
6483 if (!ret) {
6484 shost_for_each_device(sdev, shost) {
6485 q = sdev->request_queue;
6486 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
6487 q->rpm_status == RPM_SUSPENDING))
6488 pm_request_resume(q->dev);
6489 }
6490 }
6491 }
6492 #else
ufshcd_recover_pm_error(struct ufs_hba * hba)6493 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
6494 {
6495 }
6496 #endif
6497
ufshcd_is_pwr_mode_restore_needed(struct ufs_hba * hba)6498 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
6499 {
6500 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
6501 u32 mode;
6502
6503 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
6504
6505 if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
6506 return true;
6507
6508 if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
6509 return true;
6510
6511 return false;
6512 }
6513
ufshcd_abort_one(struct request * rq,void * priv)6514 static bool ufshcd_abort_one(struct request *rq, void *priv)
6515 {
6516 int *ret = priv;
6517 u32 tag = rq->tag;
6518 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
6519 struct scsi_device *sdev = cmd->device;
6520 struct Scsi_Host *shost = sdev->host;
6521 struct ufs_hba *hba = shost_priv(shost);
6522
6523 *ret = ufshcd_try_to_abort_task(hba, tag);
6524 dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
6525 hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
6526 *ret ? "failed" : "succeeded");
6527
6528 return *ret == 0;
6529 }
6530
6531 /**
6532 * ufshcd_abort_all - Abort all pending commands.
6533 * @hba: Host bus adapter pointer.
6534 *
6535 * Return: true if and only if the host controller needs to be reset.
6536 */
ufshcd_abort_all(struct ufs_hba * hba)6537 static bool ufshcd_abort_all(struct ufs_hba *hba)
6538 {
6539 int tag, ret = 0;
6540
6541 blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
6542 if (ret)
6543 goto out;
6544
6545 /* Clear pending task management requests */
6546 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
6547 ret = ufshcd_clear_tm_cmd(hba, tag);
6548 if (ret)
6549 goto out;
6550 }
6551
6552 out:
6553 /* Complete the requests that are cleared by s/w */
6554 ufshcd_complete_requests(hba, false);
6555
6556 return ret != 0;
6557 }
6558
6559 /**
6560 * ufshcd_err_handler - handle UFS errors that require s/w attention
6561 * @work: pointer to work structure
6562 */
ufshcd_err_handler(struct work_struct * work)6563 static void ufshcd_err_handler(struct work_struct *work)
6564 {
6565 int retries = MAX_ERR_HANDLER_RETRIES;
6566 struct ufs_hba *hba;
6567 unsigned long flags;
6568 bool needs_restore;
6569 bool needs_reset;
6570 int pmc_err;
6571
6572 hba = container_of(work, struct ufs_hba, eh_work);
6573
6574 dev_info(hba->dev,
6575 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6576 __func__, ufshcd_state_name[hba->ufshcd_state],
6577 hba->is_powered, hba->shutting_down, hba->saved_err,
6578 hba->saved_uic_err, hba->force_reset,
6579 ufshcd_is_link_broken(hba) ? "; link is broken" : "");
6580
6581 down(&hba->host_sem);
6582 spin_lock_irqsave(hba->host->host_lock, flags);
6583 if (ufshcd_err_handling_should_stop(hba)) {
6584 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6585 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6586 spin_unlock_irqrestore(hba->host->host_lock, flags);
6587 up(&hba->host_sem);
6588 return;
6589 }
6590 ufshcd_set_eh_in_progress(hba);
6591 spin_unlock_irqrestore(hba->host->host_lock, flags);
6592 ufshcd_err_handling_prepare(hba);
6593 /* Complete requests that have door-bell cleared by h/w */
6594 ufshcd_complete_requests(hba, false);
6595 spin_lock_irqsave(hba->host->host_lock, flags);
6596 again:
6597 needs_restore = false;
6598 needs_reset = false;
6599
6600 if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
6601 hba->ufshcd_state = UFSHCD_STATE_RESET;
6602 /*
6603 * A full reset and restore might have happened after preparation
6604 * is finished, double check whether we should stop.
6605 */
6606 if (ufshcd_err_handling_should_stop(hba))
6607 goto skip_err_handling;
6608
6609 if ((hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) &&
6610 !hba->force_reset) {
6611 bool ret;
6612
6613 spin_unlock_irqrestore(hba->host->host_lock, flags);
6614 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6615 ret = ufshcd_quirk_dl_nac_errors(hba);
6616 spin_lock_irqsave(hba->host->host_lock, flags);
6617 if (!ret && ufshcd_err_handling_should_stop(hba))
6618 goto skip_err_handling;
6619 }
6620
6621 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6622 (hba->saved_uic_err &&
6623 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6624 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
6625
6626 spin_unlock_irqrestore(hba->host->host_lock, flags);
6627 ufshcd_print_host_state(hba);
6628 ufshcd_print_pwr_info(hba);
6629 ufshcd_print_evt_hist(hba);
6630 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
6631 ufshcd_print_trs_all(hba, pr_prdt);
6632 spin_lock_irqsave(hba->host->host_lock, flags);
6633 }
6634
6635 /*
6636 * if host reset is required then skip clearing the pending
6637 * transfers forcefully because they will get cleared during
6638 * host reset and restore
6639 */
6640 if (hba->force_reset || ufshcd_is_link_broken(hba) ||
6641 ufshcd_is_saved_err_fatal(hba) ||
6642 ((hba->saved_err & UIC_ERROR) &&
6643 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
6644 UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
6645 needs_reset = true;
6646 goto do_reset;
6647 }
6648
6649 /*
6650 * If LINERESET was caught, UFS might have been put to PWM mode,
6651 * check if power mode restore is needed.
6652 */
6653 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
6654 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6655 if (!hba->saved_uic_err)
6656 hba->saved_err &= ~UIC_ERROR;
6657 spin_unlock_irqrestore(hba->host->host_lock, flags);
6658 if (ufshcd_is_pwr_mode_restore_needed(hba))
6659 needs_restore = true;
6660 spin_lock_irqsave(hba->host->host_lock, flags);
6661 if (!hba->saved_err && !needs_restore)
6662 goto skip_err_handling;
6663 }
6664
6665 hba->silence_err_logs = true;
6666 /* release lock as clear command might sleep */
6667 spin_unlock_irqrestore(hba->host->host_lock, flags);
6668
6669 needs_reset = ufshcd_abort_all(hba);
6670
6671 spin_lock_irqsave(hba->host->host_lock, flags);
6672 hba->silence_err_logs = false;
6673 if (needs_reset)
6674 goto do_reset;
6675
6676 /*
6677 * After all reqs and tasks are cleared from doorbell,
6678 * now it is safe to retore power mode.
6679 */
6680 if (needs_restore) {
6681 spin_unlock_irqrestore(hba->host->host_lock, flags);
6682 /*
6683 * Hold the scaling lock just in case dev cmds
6684 * are sent via bsg and/or sysfs.
6685 */
6686 down_write(&hba->clk_scaling_lock);
6687 hba->force_pmc = true;
6688 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
6689 if (pmc_err) {
6690 needs_reset = true;
6691 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
6692 __func__, pmc_err);
6693 }
6694 hba->force_pmc = false;
6695 ufshcd_print_pwr_info(hba);
6696 up_write(&hba->clk_scaling_lock);
6697 spin_lock_irqsave(hba->host->host_lock, flags);
6698 }
6699
6700 do_reset:
6701 /* Fatal errors need reset */
6702 if (needs_reset) {
6703 int err;
6704
6705 hba->force_reset = false;
6706 spin_unlock_irqrestore(hba->host->host_lock, flags);
6707 err = ufshcd_reset_and_restore(hba);
6708 if (err)
6709 dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
6710 __func__, err);
6711 else
6712 ufshcd_recover_pm_error(hba);
6713 spin_lock_irqsave(hba->host->host_lock, flags);
6714 }
6715
6716 skip_err_handling:
6717 if (!needs_reset) {
6718 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
6719 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6720 if (hba->saved_err || hba->saved_uic_err)
6721 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6722 __func__, hba->saved_err, hba->saved_uic_err);
6723 }
6724 /* Exit in an operational state or dead */
6725 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
6726 hba->ufshcd_state != UFSHCD_STATE_ERROR) {
6727 if (--retries)
6728 goto again;
6729 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6730 }
6731 ufshcd_clear_eh_in_progress(hba);
6732 spin_unlock_irqrestore(hba->host->host_lock, flags);
6733 ufshcd_err_handling_unprepare(hba);
6734 up(&hba->host_sem);
6735
6736 dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
6737 ufshcd_state_name[hba->ufshcd_state]);
6738 }
6739
6740 /**
6741 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6742 * @hba: per-adapter instance
6743 *
6744 * Return:
6745 * IRQ_HANDLED - If interrupt is valid
6746 * IRQ_NONE - If invalid interrupt
6747 */
ufshcd_update_uic_error(struct ufs_hba * hba)6748 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
6749 {
6750 u32 reg;
6751 irqreturn_t retval = IRQ_NONE;
6752
6753 /* PHY layer error */
6754 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
6755 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
6756 (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
6757 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
6758 /*
6759 * To know whether this error is fatal or not, DB timeout
6760 * must be checked but this error is handled separately.
6761 */
6762 if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
6763 dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
6764 __func__);
6765
6766 /* Got a LINERESET indication. */
6767 if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
6768 struct uic_command *cmd = NULL;
6769
6770 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
6771 if (hba->uic_async_done && hba->active_uic_cmd)
6772 cmd = hba->active_uic_cmd;
6773 /*
6774 * Ignore the LINERESET during power mode change
6775 * operation via DME_SET command.
6776 */
6777 if (cmd && (cmd->command == UIC_CMD_DME_SET))
6778 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
6779 }
6780 retval |= IRQ_HANDLED;
6781 }
6782
6783 /* PA_INIT_ERROR is fatal and needs UIC reset */
6784 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
6785 if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
6786 (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
6787 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
6788
6789 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
6790 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
6791 else if (hba->dev_quirks &
6792 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
6793 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
6794 hba->uic_error |=
6795 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
6796 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
6797 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
6798 }
6799 retval |= IRQ_HANDLED;
6800 }
6801
6802 /* UIC NL/TL/DME errors needs software retry */
6803 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
6804 if ((reg & UIC_NETWORK_LAYER_ERROR) &&
6805 (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
6806 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
6807 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
6808 retval |= IRQ_HANDLED;
6809 }
6810
6811 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
6812 if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
6813 (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
6814 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
6815 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
6816 retval |= IRQ_HANDLED;
6817 }
6818
6819 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
6820 if ((reg & UIC_DME_ERROR) &&
6821 (reg & UIC_DME_ERROR_CODE_MASK)) {
6822 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
6823 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
6824 retval |= IRQ_HANDLED;
6825 }
6826
6827 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
6828 __func__, hba->uic_error);
6829 return retval;
6830 }
6831
6832 /**
6833 * ufshcd_check_errors - Check for errors that need s/w attention
6834 * @hba: per-adapter instance
6835 * @intr_status: interrupt status generated by the controller
6836 *
6837 * Return:
6838 * IRQ_HANDLED - If interrupt is valid
6839 * IRQ_NONE - If invalid interrupt
6840 */
ufshcd_check_errors(struct ufs_hba * hba,u32 intr_status)6841 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
6842 {
6843 bool queue_eh_work = false;
6844 irqreturn_t retval = IRQ_NONE;
6845
6846 spin_lock(hba->host->host_lock);
6847 hba->errors |= UFSHCD_ERROR_MASK & intr_status;
6848
6849 if (hba->errors & INT_FATAL_ERRORS) {
6850 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
6851 hba->errors);
6852 queue_eh_work = true;
6853 }
6854
6855 if (hba->errors & UIC_ERROR) {
6856 hba->uic_error = 0;
6857 retval = ufshcd_update_uic_error(hba);
6858 if (hba->uic_error)
6859 queue_eh_work = true;
6860 }
6861
6862 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
6863 dev_err(hba->dev,
6864 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6865 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
6866 "Enter" : "Exit",
6867 hba->errors, ufshcd_get_upmcrs(hba));
6868 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
6869 hba->errors);
6870 ufshcd_set_link_broken(hba);
6871 queue_eh_work = true;
6872 }
6873
6874 if (queue_eh_work) {
6875 /*
6876 * update the transfer error masks to sticky bits, let's do this
6877 * irrespective of current ufshcd_state.
6878 */
6879 hba->saved_err |= hba->errors;
6880 hba->saved_uic_err |= hba->uic_error;
6881
6882 /* dump controller state before resetting */
6883 if ((hba->saved_err &
6884 (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
6885 (hba->saved_uic_err &&
6886 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
6887 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6888 __func__, hba->saved_err,
6889 hba->saved_uic_err);
6890 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
6891 "host_regs: ");
6892 ufshcd_print_pwr_info(hba);
6893 }
6894 ufshcd_schedule_eh_work(hba);
6895 retval |= IRQ_HANDLED;
6896 }
6897 /*
6898 * if (!queue_eh_work) -
6899 * Other errors are either non-fatal where host recovers
6900 * itself without s/w intervention or errors that will be
6901 * handled by the SCSI core layer.
6902 */
6903 hba->errors = 0;
6904 hba->uic_error = 0;
6905 spin_unlock(hba->host->host_lock);
6906 return retval;
6907 }
6908
6909 /**
6910 * ufshcd_tmc_handler - handle task management function completion
6911 * @hba: per adapter instance
6912 *
6913 * Return:
6914 * IRQ_HANDLED - If interrupt is valid
6915 * IRQ_NONE - If invalid interrupt
6916 */
ufshcd_tmc_handler(struct ufs_hba * hba)6917 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
6918 {
6919 unsigned long flags, pending, issued;
6920 irqreturn_t ret = IRQ_NONE;
6921 int tag;
6922
6923 spin_lock_irqsave(hba->host->host_lock, flags);
6924 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
6925 issued = hba->outstanding_tasks & ~pending;
6926 for_each_set_bit(tag, &issued, hba->nutmrs) {
6927 struct request *req = hba->tmf_rqs[tag];
6928 struct completion *c = req->end_io_data;
6929
6930 complete(c);
6931 ret = IRQ_HANDLED;
6932 }
6933 spin_unlock_irqrestore(hba->host->host_lock, flags);
6934
6935 return ret;
6936 }
6937
6938 /**
6939 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6940 * @hba: per adapter instance
6941 *
6942 * Return: IRQ_HANDLED if interrupt is handled.
6943 */
ufshcd_handle_mcq_cq_events(struct ufs_hba * hba)6944 static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
6945 {
6946 struct ufs_hw_queue *hwq;
6947 unsigned long outstanding_cqs;
6948 unsigned int nr_queues;
6949 int i, ret;
6950 u32 events;
6951
6952 ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
6953 if (ret)
6954 outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
6955
6956 /* Exclude the poll queues */
6957 nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
6958 for_each_set_bit(i, &outstanding_cqs, nr_queues) {
6959 hwq = &hba->uhq[i];
6960
6961 events = ufshcd_mcq_read_cqis(hba, i);
6962 if (events)
6963 ufshcd_mcq_write_cqis(hba, events, i);
6964
6965 if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
6966 ufshcd_mcq_poll_cqe_lock(hba, hwq);
6967 }
6968
6969 return IRQ_HANDLED;
6970 }
6971
6972 /**
6973 * ufshcd_sl_intr - Interrupt service routine
6974 * @hba: per adapter instance
6975 * @intr_status: contains interrupts generated by the controller
6976 *
6977 * Return:
6978 * IRQ_HANDLED - If interrupt is valid
6979 * IRQ_NONE - If invalid interrupt
6980 */
ufshcd_sl_intr(struct ufs_hba * hba,u32 intr_status)6981 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
6982 {
6983 irqreturn_t retval = IRQ_NONE;
6984
6985 if (intr_status & UFSHCD_UIC_MASK)
6986 retval |= ufshcd_uic_cmd_compl(hba, intr_status);
6987
6988 if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
6989 retval |= ufshcd_check_errors(hba, intr_status);
6990
6991 if (intr_status & UTP_TASK_REQ_COMPL)
6992 retval |= ufshcd_tmc_handler(hba);
6993
6994 if (intr_status & UTP_TRANSFER_REQ_COMPL)
6995 retval |= ufshcd_transfer_req_compl(hba);
6996
6997 if (intr_status & MCQ_CQ_EVENT_STATUS)
6998 retval |= ufshcd_handle_mcq_cq_events(hba);
6999
7000 return retval;
7001 }
7002
7003 /**
7004 * ufshcd_intr - Main interrupt service routine
7005 * @irq: irq number
7006 * @__hba: pointer to adapter instance
7007 *
7008 * Return:
7009 * IRQ_HANDLED - If interrupt is valid
7010 * IRQ_NONE - If invalid interrupt
7011 */
ufshcd_intr(int irq,void * __hba)7012 static irqreturn_t ufshcd_intr(int irq, void *__hba)
7013 {
7014 u32 intr_status, enabled_intr_status = 0;
7015 irqreturn_t retval = IRQ_NONE;
7016 struct ufs_hba *hba = __hba;
7017 int retries = hba->nutrs;
7018
7019 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7020 hba->ufs_stats.last_intr_status = intr_status;
7021 hba->ufs_stats.last_intr_ts = local_clock();
7022
7023 /*
7024 * There could be max of hba->nutrs reqs in flight and in worst case
7025 * if the reqs get finished 1 by 1 after the interrupt status is
7026 * read, make sure we handle them by checking the interrupt status
7027 * again in a loop until we process all of the reqs before returning.
7028 */
7029 while (intr_status && retries--) {
7030 enabled_intr_status =
7031 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
7032 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7033 if (enabled_intr_status)
7034 retval |= ufshcd_sl_intr(hba, enabled_intr_status);
7035
7036 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7037 }
7038
7039 if (enabled_intr_status && retval == IRQ_NONE &&
7040 (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
7041 hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
7042 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
7043 __func__,
7044 intr_status,
7045 hba->ufs_stats.last_intr_status,
7046 enabled_intr_status);
7047 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
7048 }
7049
7050 return retval;
7051 }
7052
ufshcd_clear_tm_cmd(struct ufs_hba * hba,int tag)7053 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
7054 {
7055 int err = 0;
7056 u32 mask = 1 << tag;
7057
7058 if (!test_bit(tag, &hba->outstanding_tasks))
7059 goto out;
7060
7061 ufshcd_utmrl_clear(hba, tag);
7062
7063 /* poll for max. 1 sec to clear door bell register by h/w */
7064 err = ufshcd_wait_for_register(hba,
7065 REG_UTP_TASK_REQ_DOOR_BELL,
7066 mask, 0, 1000, 1000);
7067
7068 dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
7069 tag, err < 0 ? "failed" : "succeeded");
7070
7071 out:
7072 return err;
7073 }
7074
__ufshcd_issue_tm_cmd(struct ufs_hba * hba,struct utp_task_req_desc * treq,u8 tm_function)7075 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
7076 struct utp_task_req_desc *treq, u8 tm_function)
7077 {
7078 struct request_queue *q = hba->tmf_queue;
7079 struct Scsi_Host *host = hba->host;
7080 DECLARE_COMPLETION_ONSTACK(wait);
7081 struct request *req;
7082 unsigned long flags;
7083 int task_tag, err;
7084
7085 /*
7086 * blk_mq_alloc_request() is used here only to get a free tag.
7087 */
7088 req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
7089 if (IS_ERR(req))
7090 return PTR_ERR(req);
7091
7092 req->end_io_data = &wait;
7093 ufshcd_hold(hba);
7094
7095 spin_lock_irqsave(host->host_lock, flags);
7096
7097 task_tag = req->tag;
7098 hba->tmf_rqs[req->tag] = req;
7099 treq->upiu_req.req_header.task_tag = task_tag;
7100
7101 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
7102 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
7103
7104 __set_bit(task_tag, &hba->outstanding_tasks);
7105
7106 spin_unlock_irqrestore(host->host_lock, flags);
7107
7108 /* send command to the controller */
7109 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
7110
7111 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
7112
7113 /* wait until the task management command is completed */
7114 err = wait_for_completion_io_timeout(&wait,
7115 msecs_to_jiffies(TM_CMD_TIMEOUT));
7116 if (!err) {
7117 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
7118 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
7119 __func__, tm_function);
7120 if (ufshcd_clear_tm_cmd(hba, task_tag))
7121 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7122 __func__, task_tag);
7123 err = -ETIMEDOUT;
7124 } else {
7125 err = 0;
7126 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
7127
7128 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
7129 }
7130
7131 spin_lock_irqsave(hba->host->host_lock, flags);
7132 hba->tmf_rqs[req->tag] = NULL;
7133 __clear_bit(task_tag, &hba->outstanding_tasks);
7134 spin_unlock_irqrestore(hba->host->host_lock, flags);
7135
7136 ufshcd_release(hba);
7137 blk_mq_free_request(req);
7138
7139 return err;
7140 }
7141
7142 /**
7143 * ufshcd_issue_tm_cmd - issues task management commands to controller
7144 * @hba: per adapter instance
7145 * @lun_id: LUN ID to which TM command is sent
7146 * @task_id: task ID to which the TM command is applicable
7147 * @tm_function: task management function opcode
7148 * @tm_response: task management service response return value
7149 *
7150 * Return: non-zero value on error, zero on success.
7151 */
ufshcd_issue_tm_cmd(struct ufs_hba * hba,int lun_id,int task_id,u8 tm_function,u8 * tm_response)7152 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
7153 u8 tm_function, u8 *tm_response)
7154 {
7155 struct utp_task_req_desc treq = { };
7156 enum utp_ocs ocs_value;
7157 int err;
7158
7159 /* Configure task request descriptor */
7160 treq.header.interrupt = 1;
7161 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7162
7163 /* Configure task request UPIU */
7164 treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
7165 treq.upiu_req.req_header.lun = lun_id;
7166 treq.upiu_req.req_header.tm_function = tm_function;
7167
7168 /*
7169 * The host shall provide the same value for LUN field in the basic
7170 * header and for Input Parameter.
7171 */
7172 treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
7173 treq.upiu_req.input_param2 = cpu_to_be32(task_id);
7174
7175 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
7176 if (err == -ETIMEDOUT)
7177 return err;
7178
7179 ocs_value = treq.header.ocs & MASK_OCS;
7180 if (ocs_value != OCS_SUCCESS)
7181 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
7182 __func__, ocs_value);
7183 else if (tm_response)
7184 *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
7185 MASK_TM_SERVICE_RESP;
7186 return err;
7187 }
7188
7189 /**
7190 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7191 * @hba: per-adapter instance
7192 * @req_upiu: upiu request
7193 * @rsp_upiu: upiu reply
7194 * @desc_buff: pointer to descriptor buffer, NULL if NA
7195 * @buff_len: descriptor size, 0 if NA
7196 * @cmd_type: specifies the type (NOP, Query...)
7197 * @desc_op: descriptor operation
7198 *
7199 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7200 * Therefore, it "rides" the device management infrastructure: uses its tag and
7201 * tasks work queues.
7202 *
7203 * Since there is only one available tag for device management commands,
7204 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7205 *
7206 * Return: 0 upon success; < 0 upon failure.
7207 */
ufshcd_issue_devman_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,u8 * desc_buff,int * buff_len,enum dev_cmd_type cmd_type,enum query_opcode desc_op)7208 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7209 struct utp_upiu_req *req_upiu,
7210 struct utp_upiu_req *rsp_upiu,
7211 u8 *desc_buff, int *buff_len,
7212 enum dev_cmd_type cmd_type,
7213 enum query_opcode desc_op)
7214 {
7215 const u32 tag = hba->reserved_slot;
7216 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7217 int err = 0;
7218 u8 upiu_flags;
7219
7220 /* Protects use of hba->reserved_slot. */
7221 lockdep_assert_held(&hba->dev_cmd.lock);
7222
7223 ufshcd_setup_dev_cmd(hba, lrbp, cmd_type, 0, tag);
7224
7225 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, 0);
7226
7227 /* update the task tag in the request upiu */
7228 req_upiu->header.task_tag = tag;
7229
7230 /* just copy the upiu request as it is */
7231 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7232 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
7233 /* The Data Segment Area is optional depending upon the query
7234 * function value. for WRITE DESCRIPTOR, the data segment
7235 * follows right after the tsf.
7236 */
7237 memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
7238 *buff_len = 0;
7239 }
7240
7241 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7242
7243 /*
7244 * ignore the returning value here - ufshcd_check_query_response is
7245 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7246 * read the response directly ignoring all errors.
7247 */
7248 ufshcd_issue_dev_cmd(hba, lrbp, tag, QUERY_REQ_TIMEOUT);
7249
7250 /* just copy the upiu response as it is */
7251 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7252 if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
7253 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
7254 u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
7255 .data_segment_length);
7256
7257 if (*buff_len >= resp_len) {
7258 memcpy(desc_buff, descp, resp_len);
7259 *buff_len = resp_len;
7260 } else {
7261 dev_warn(hba->dev,
7262 "%s: rsp size %d is bigger than buffer size %d",
7263 __func__, resp_len, *buff_len);
7264 *buff_len = 0;
7265 err = -EINVAL;
7266 }
7267 }
7268
7269 return err;
7270 }
7271
7272 /**
7273 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7274 * @hba: per-adapter instance
7275 * @req_upiu: upiu request
7276 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7277 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7278 * @desc_buff: pointer to descriptor buffer, NULL if NA
7279 * @buff_len: descriptor size, 0 if NA
7280 * @desc_op: descriptor operation
7281 *
7282 * Supports UTP Transfer requests (nop and query), and UTP Task
7283 * Management requests.
7284 * It is up to the caller to fill the upiu conent properly, as it will
7285 * be copied without any further input validations.
7286 *
7287 * Return: 0 upon success; < 0 upon failure.
7288 */
ufshcd_exec_raw_upiu_cmd(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,enum upiu_request_transaction msgcode,u8 * desc_buff,int * buff_len,enum query_opcode desc_op)7289 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
7290 struct utp_upiu_req *req_upiu,
7291 struct utp_upiu_req *rsp_upiu,
7292 enum upiu_request_transaction msgcode,
7293 u8 *desc_buff, int *buff_len,
7294 enum query_opcode desc_op)
7295 {
7296 int err;
7297 enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
7298 struct utp_task_req_desc treq = { };
7299 enum utp_ocs ocs_value;
7300 u8 tm_f = req_upiu->header.tm_function;
7301
7302 switch (msgcode) {
7303 case UPIU_TRANSACTION_NOP_OUT:
7304 cmd_type = DEV_CMD_TYPE_NOP;
7305 fallthrough;
7306 case UPIU_TRANSACTION_QUERY_REQ:
7307 ufshcd_dev_man_lock(hba);
7308 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
7309 desc_buff, buff_len,
7310 cmd_type, desc_op);
7311 ufshcd_dev_man_unlock(hba);
7312
7313 break;
7314 case UPIU_TRANSACTION_TASK_REQ:
7315 treq.header.interrupt = 1;
7316 treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
7317
7318 memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
7319
7320 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
7321 if (err == -ETIMEDOUT)
7322 break;
7323
7324 ocs_value = treq.header.ocs & MASK_OCS;
7325 if (ocs_value != OCS_SUCCESS) {
7326 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
7327 ocs_value);
7328 break;
7329 }
7330
7331 memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
7332
7333 break;
7334 default:
7335 err = -EINVAL;
7336
7337 break;
7338 }
7339
7340 return err;
7341 }
7342
7343 /**
7344 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7345 * @hba: per adapter instance
7346 * @req_upiu: upiu request
7347 * @rsp_upiu: upiu reply
7348 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7349 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7350 * @sg_cnt: The number of sg lists actually used
7351 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7352 * @dir: DMA direction
7353 *
7354 * Return: zero on success, non-zero on failure.
7355 */
ufshcd_advanced_rpmb_req_handler(struct ufs_hba * hba,struct utp_upiu_req * req_upiu,struct utp_upiu_req * rsp_upiu,struct ufs_ehs * req_ehs,struct ufs_ehs * rsp_ehs,int sg_cnt,struct scatterlist * sg_list,enum dma_data_direction dir)7356 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
7357 struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
7358 struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
7359 enum dma_data_direction dir)
7360 {
7361 const u32 tag = hba->reserved_slot;
7362 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7363 int err = 0;
7364 int result;
7365 u8 upiu_flags;
7366 u8 *ehs_data;
7367 u16 ehs_len;
7368 int ehs = (hba->capabilities & MASK_EHSLUTRD_SUPPORTED) ? 2 : 0;
7369
7370 /* Protects use of hba->reserved_slot. */
7371 ufshcd_dev_man_lock(hba);
7372
7373 ufshcd_setup_dev_cmd(hba, lrbp, DEV_CMD_TYPE_RPMB, UFS_UPIU_RPMB_WLUN, tag);
7374
7375 ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags, DMA_NONE, ehs);
7376
7377 /* update the task tag */
7378 req_upiu->header.task_tag = tag;
7379
7380 /* copy the UPIU(contains CDB) request as it is */
7381 memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
7382 /* Copy EHS, starting with byte32, immediately after the CDB package */
7383 memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
7384
7385 if (dir != DMA_NONE && sg_list)
7386 ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
7387
7388 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
7389
7390 err = ufshcd_issue_dev_cmd(hba, lrbp, tag, ADVANCED_RPMB_REQ_TIMEOUT);
7391
7392 if (!err) {
7393 /* Just copy the upiu response as it is */
7394 memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
7395 /* Get the response UPIU result */
7396 result = (lrbp->ucd_rsp_ptr->header.response << 8) |
7397 lrbp->ucd_rsp_ptr->header.status;
7398
7399 ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
7400 /*
7401 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7402 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7403 * Message is 02h
7404 */
7405 if (ehs_len == 2 && rsp_ehs) {
7406 /*
7407 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7408 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7409 */
7410 ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
7411 memcpy(rsp_ehs, ehs_data, ehs_len * 32);
7412 }
7413 }
7414
7415 ufshcd_dev_man_unlock(hba);
7416
7417 return err ? : result;
7418 }
7419
7420 /**
7421 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7422 * @cmd: SCSI command pointer
7423 *
7424 * Return: SUCCESS or FAILED.
7425 */
ufshcd_eh_device_reset_handler(struct scsi_cmnd * cmd)7426 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7427 {
7428 unsigned long flags, pending_reqs = 0, not_cleared = 0;
7429 struct Scsi_Host *host;
7430 struct ufs_hba *hba;
7431 struct ufs_hw_queue *hwq;
7432 struct ufshcd_lrb *lrbp;
7433 u32 pos, not_cleared_mask = 0;
7434 int err;
7435 u8 resp = 0xF, lun;
7436
7437 host = cmd->device->host;
7438 hba = shost_priv(host);
7439
7440 lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
7441 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
7442 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7443 if (!err)
7444 err = resp;
7445 goto out;
7446 }
7447
7448 if (hba->mcq_enabled) {
7449 for (pos = 0; pos < hba->nutrs; pos++) {
7450 lrbp = &hba->lrb[pos];
7451 if (ufshcd_cmd_inflight(lrbp->cmd) &&
7452 lrbp->lun == lun) {
7453 ufshcd_clear_cmd(hba, pos);
7454 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
7455 ufshcd_mcq_poll_cqe_lock(hba, hwq);
7456 }
7457 }
7458 err = 0;
7459 goto out;
7460 }
7461
7462 /* clear the commands that were pending for corresponding LUN */
7463 spin_lock_irqsave(&hba->outstanding_lock, flags);
7464 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
7465 if (hba->lrb[pos].lun == lun)
7466 __set_bit(pos, &pending_reqs);
7467 hba->outstanding_reqs &= ~pending_reqs;
7468 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7469
7470 for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
7471 if (ufshcd_clear_cmd(hba, pos) < 0) {
7472 spin_lock_irqsave(&hba->outstanding_lock, flags);
7473 not_cleared = 1U << pos &
7474 ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7475 hba->outstanding_reqs |= not_cleared;
7476 not_cleared_mask |= not_cleared;
7477 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7478
7479 dev_err(hba->dev, "%s: failed to clear request %d\n",
7480 __func__, pos);
7481 }
7482 }
7483 __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
7484
7485 out:
7486 hba->req_abort_count = 0;
7487 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
7488 if (!err) {
7489 err = SUCCESS;
7490 } else {
7491 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7492 err = FAILED;
7493 }
7494 return err;
7495 }
7496
ufshcd_set_req_abort_skip(struct ufs_hba * hba,unsigned long bitmap)7497 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
7498 {
7499 struct ufshcd_lrb *lrbp;
7500 int tag;
7501
7502 for_each_set_bit(tag, &bitmap, hba->nutrs) {
7503 lrbp = &hba->lrb[tag];
7504 lrbp->req_abort_skip = true;
7505 }
7506 }
7507
7508 /**
7509 * ufshcd_try_to_abort_task - abort a specific task
7510 * @hba: Pointer to adapter instance
7511 * @tag: Task tag/index to be aborted
7512 *
7513 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7514 * command, and in host controller by clearing the door-bell register. There can
7515 * be race between controller sending the command to the device while abort is
7516 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7517 * really issued and then try to abort it.
7518 *
7519 * Return: zero on success, non-zero on failure.
7520 */
ufshcd_try_to_abort_task(struct ufs_hba * hba,int tag)7521 int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
7522 {
7523 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7524 int err;
7525 int poll_cnt;
7526 u8 resp = 0xF;
7527
7528 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
7529 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7530 UFS_QUERY_TASK, &resp);
7531 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
7532 /* cmd pending in the device */
7533 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
7534 __func__, tag);
7535 break;
7536 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7537 /*
7538 * cmd not pending in the device, check if it is
7539 * in transition.
7540 */
7541 dev_info(
7542 hba->dev,
7543 "%s: cmd with tag %d not pending in the device.\n",
7544 __func__, tag);
7545 if (!ufshcd_cmd_inflight(lrbp->cmd)) {
7546 dev_info(hba->dev,
7547 "%s: cmd with tag=%d completed.\n",
7548 __func__, tag);
7549 return 0;
7550 }
7551 usleep_range(100, 200);
7552 } else {
7553 dev_err(hba->dev,
7554 "%s: no response from device. tag = %d, err %d\n",
7555 __func__, tag, err);
7556 return err ? : resp;
7557 }
7558 }
7559
7560 if (!poll_cnt)
7561 return -EBUSY;
7562
7563 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
7564 UFS_ABORT_TASK, &resp);
7565 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
7566 if (!err) {
7567 err = resp; /* service response error */
7568 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
7569 __func__, tag, err);
7570 }
7571 return err;
7572 }
7573
7574 err = ufshcd_clear_cmd(hba, tag);
7575 if (err)
7576 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
7577 __func__, tag, err);
7578
7579 return err;
7580 }
7581
7582 /**
7583 * ufshcd_abort - scsi host template eh_abort_handler callback
7584 * @cmd: SCSI command pointer
7585 *
7586 * Return: SUCCESS or FAILED.
7587 */
ufshcd_abort(struct scsi_cmnd * cmd)7588 static int ufshcd_abort(struct scsi_cmnd *cmd)
7589 {
7590 struct Scsi_Host *host = cmd->device->host;
7591 struct ufs_hba *hba = shost_priv(host);
7592 int tag = scsi_cmd_to_rq(cmd)->tag;
7593 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
7594 unsigned long flags;
7595 int err = FAILED;
7596 bool outstanding;
7597 u32 reg;
7598
7599 ufshcd_hold(hba);
7600
7601 if (!hba->mcq_enabled) {
7602 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7603 if (!test_bit(tag, &hba->outstanding_reqs)) {
7604 /* If command is already aborted/completed, return FAILED. */
7605 dev_err(hba->dev,
7606 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7607 __func__, tag, hba->outstanding_reqs, reg);
7608 goto release;
7609 }
7610 }
7611
7612 /* Print Transfer Request of aborted task */
7613 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
7614
7615 /*
7616 * Print detailed info about aborted request.
7617 * As more than one request might get aborted at the same time,
7618 * print full information only for the first aborted request in order
7619 * to reduce repeated printouts. For other aborted requests only print
7620 * basic details.
7621 */
7622 scsi_print_command(cmd);
7623 if (!hba->req_abort_count) {
7624 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
7625 ufshcd_print_evt_hist(hba);
7626 ufshcd_print_host_state(hba);
7627 ufshcd_print_pwr_info(hba);
7628 ufshcd_print_tr(hba, tag, true);
7629 } else {
7630 ufshcd_print_tr(hba, tag, false);
7631 }
7632 hba->req_abort_count++;
7633
7634 if (!hba->mcq_enabled && !(reg & (1 << tag))) {
7635 /* only execute this code in single doorbell mode */
7636 dev_err(hba->dev,
7637 "%s: cmd was completed, but without a notifying intr, tag = %d",
7638 __func__, tag);
7639 __ufshcd_transfer_req_compl(hba, 1UL << tag);
7640 goto release;
7641 }
7642
7643 /*
7644 * Task abort to the device W-LUN is illegal. When this command
7645 * will fail, due to spec violation, scsi err handling next step
7646 * will be to send LU reset which, again, is a spec violation.
7647 * To avoid these unnecessary/illegal steps, first we clean up
7648 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7649 * then queue the eh_work and bail.
7650 */
7651 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
7652 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
7653
7654 spin_lock_irqsave(host->host_lock, flags);
7655 hba->force_reset = true;
7656 ufshcd_schedule_eh_work(hba);
7657 spin_unlock_irqrestore(host->host_lock, flags);
7658 goto release;
7659 }
7660
7661 if (hba->mcq_enabled) {
7662 /* MCQ mode. Branch off to handle abort for mcq mode */
7663 err = ufshcd_mcq_abort(cmd);
7664 goto release;
7665 }
7666
7667 /* Skip task abort in case previous aborts failed and report failure */
7668 if (lrbp->req_abort_skip) {
7669 dev_err(hba->dev, "%s: skipping abort\n", __func__);
7670 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7671 goto release;
7672 }
7673
7674 err = ufshcd_try_to_abort_task(hba, tag);
7675 if (err) {
7676 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
7677 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
7678 err = FAILED;
7679 goto release;
7680 }
7681
7682 /*
7683 * Clear the corresponding bit from outstanding_reqs since the command
7684 * has been aborted successfully.
7685 */
7686 spin_lock_irqsave(&hba->outstanding_lock, flags);
7687 outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
7688 spin_unlock_irqrestore(&hba->outstanding_lock, flags);
7689
7690 if (outstanding)
7691 ufshcd_release_scsi_cmd(hba, lrbp);
7692
7693 err = SUCCESS;
7694
7695 release:
7696 /* Matches the ufshcd_hold() call at the start of this function. */
7697 ufshcd_release(hba);
7698 return err;
7699 }
7700
7701 /**
7702 * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result.
7703 * @hba: UFS host controller instance.
7704 * @probe_start: time when the ufshcd_probe_hba() call started.
7705 * @ret: ufshcd_probe_hba() return value.
7706 */
ufshcd_process_probe_result(struct ufs_hba * hba,ktime_t probe_start,int ret)7707 static void ufshcd_process_probe_result(struct ufs_hba *hba,
7708 ktime_t probe_start, int ret)
7709 {
7710 unsigned long flags;
7711
7712 spin_lock_irqsave(hba->host->host_lock, flags);
7713 if (ret)
7714 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7715 else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
7716 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
7717 spin_unlock_irqrestore(hba->host->host_lock, flags);
7718
7719 trace_ufshcd_init(hba, ret,
7720 ktime_to_us(ktime_sub(ktime_get(), probe_start)),
7721 hba->curr_dev_pwr_mode, hba->uic_link_state);
7722 }
7723
7724 /**
7725 * ufshcd_host_reset_and_restore - reset and restore host controller
7726 * @hba: per-adapter instance
7727 *
7728 * Note that host controller reset may issue DME_RESET to
7729 * local and remote (device) Uni-Pro stack and the attributes
7730 * are reset to default state.
7731 *
7732 * Return: zero on success, non-zero on failure.
7733 */
ufshcd_host_reset_and_restore(struct ufs_hba * hba)7734 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
7735 {
7736 int err;
7737
7738 /*
7739 * Stop the host controller and complete the requests
7740 * cleared by h/w
7741 */
7742 ufshcd_hba_stop(hba);
7743 hba->silence_err_logs = true;
7744 ufshcd_complete_requests(hba, true);
7745 hba->silence_err_logs = false;
7746
7747 /* scale up clocks to max frequency before full reinitialization */
7748 ufshcd_scale_clks(hba, ULONG_MAX, true);
7749
7750 err = ufshcd_hba_enable(hba);
7751
7752 /* Establish the link again and restore the device */
7753 if (!err) {
7754 ktime_t probe_start = ktime_get();
7755
7756 err = ufshcd_device_init(hba, /*init_dev_params=*/false);
7757 if (!err)
7758 err = ufshcd_probe_hba(hba, false);
7759 ufshcd_process_probe_result(hba, probe_start, err);
7760 }
7761
7762 if (err)
7763 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
7764 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
7765 return err;
7766 }
7767
7768 /**
7769 * ufshcd_reset_and_restore - reset and re-initialize host/device
7770 * @hba: per-adapter instance
7771 *
7772 * Reset and recover device, host and re-establish link. This
7773 * is helpful to recover the communication in fatal error conditions.
7774 *
7775 * Return: zero on success, non-zero on failure.
7776 */
ufshcd_reset_and_restore(struct ufs_hba * hba)7777 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
7778 {
7779 u32 saved_err = 0;
7780 u32 saved_uic_err = 0;
7781 int err = 0;
7782 unsigned long flags;
7783 int retries = MAX_HOST_RESET_RETRIES;
7784
7785 spin_lock_irqsave(hba->host->host_lock, flags);
7786 do {
7787 /*
7788 * This is a fresh start, cache and clear saved error first,
7789 * in case new error generated during reset and restore.
7790 */
7791 saved_err |= hba->saved_err;
7792 saved_uic_err |= hba->saved_uic_err;
7793 hba->saved_err = 0;
7794 hba->saved_uic_err = 0;
7795 hba->force_reset = false;
7796 hba->ufshcd_state = UFSHCD_STATE_RESET;
7797 spin_unlock_irqrestore(hba->host->host_lock, flags);
7798
7799 /* Reset the attached device */
7800 ufshcd_device_reset(hba);
7801
7802 err = ufshcd_host_reset_and_restore(hba);
7803
7804 spin_lock_irqsave(hba->host->host_lock, flags);
7805 if (err)
7806 continue;
7807 /* Do not exit unless operational or dead */
7808 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
7809 hba->ufshcd_state != UFSHCD_STATE_ERROR &&
7810 hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
7811 err = -EAGAIN;
7812 } while (err && --retries);
7813
7814 /*
7815 * Inform scsi mid-layer that we did reset and allow to handle
7816 * Unit Attention properly.
7817 */
7818 scsi_report_bus_reset(hba->host, 0);
7819 if (err) {
7820 hba->ufshcd_state = UFSHCD_STATE_ERROR;
7821 hba->saved_err |= saved_err;
7822 hba->saved_uic_err |= saved_uic_err;
7823 }
7824 spin_unlock_irqrestore(hba->host->host_lock, flags);
7825
7826 return err;
7827 }
7828
7829 /**
7830 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7831 * @cmd: SCSI command pointer
7832 *
7833 * Return: SUCCESS or FAILED.
7834 */
ufshcd_eh_host_reset_handler(struct scsi_cmnd * cmd)7835 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
7836 {
7837 int err = SUCCESS;
7838 unsigned long flags;
7839 struct ufs_hba *hba;
7840
7841 hba = shost_priv(cmd->device->host);
7842
7843 /*
7844 * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7845 * stuck in this function waiting for flush_work(&hba->eh_work). And
7846 * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7847 * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7848 */
7849 if (hba->pm_op_in_progress) {
7850 if (ufshcd_link_recovery(hba))
7851 err = FAILED;
7852
7853 return err;
7854 }
7855
7856 spin_lock_irqsave(hba->host->host_lock, flags);
7857 hba->force_reset = true;
7858 ufshcd_schedule_eh_work(hba);
7859 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
7860 spin_unlock_irqrestore(hba->host->host_lock, flags);
7861
7862 flush_work(&hba->eh_work);
7863
7864 spin_lock_irqsave(hba->host->host_lock, flags);
7865 if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
7866 err = FAILED;
7867 spin_unlock_irqrestore(hba->host->host_lock, flags);
7868
7869 return err;
7870 }
7871
7872 /**
7873 * ufshcd_get_max_icc_level - calculate the ICC level
7874 * @sup_curr_uA: max. current supported by the regulator
7875 * @start_scan: row at the desc table to start scan from
7876 * @buff: power descriptor buffer
7877 *
7878 * Return: calculated max ICC level for specific regulator.
7879 */
ufshcd_get_max_icc_level(int sup_curr_uA,u32 start_scan,const char * buff)7880 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
7881 const char *buff)
7882 {
7883 int i;
7884 int curr_uA;
7885 u16 data;
7886 u16 unit;
7887
7888 for (i = start_scan; i >= 0; i--) {
7889 data = get_unaligned_be16(&buff[2 * i]);
7890 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
7891 ATTR_ICC_LVL_UNIT_OFFSET;
7892 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
7893 switch (unit) {
7894 case UFSHCD_NANO_AMP:
7895 curr_uA = curr_uA / 1000;
7896 break;
7897 case UFSHCD_MILI_AMP:
7898 curr_uA = curr_uA * 1000;
7899 break;
7900 case UFSHCD_AMP:
7901 curr_uA = curr_uA * 1000 * 1000;
7902 break;
7903 case UFSHCD_MICRO_AMP:
7904 default:
7905 break;
7906 }
7907 if (sup_curr_uA >= curr_uA)
7908 break;
7909 }
7910 if (i < 0) {
7911 i = 0;
7912 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
7913 }
7914
7915 return (u32)i;
7916 }
7917
7918 /**
7919 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7920 * In case regulators are not initialized we'll return 0
7921 * @hba: per-adapter instance
7922 * @desc_buf: power descriptor buffer to extract ICC levels from.
7923 *
7924 * Return: calculated ICC level.
7925 */
ufshcd_find_max_sup_active_icc_level(struct ufs_hba * hba,const u8 * desc_buf)7926 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
7927 const u8 *desc_buf)
7928 {
7929 u32 icc_level = 0;
7930
7931 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
7932 !hba->vreg_info.vccq2) {
7933 /*
7934 * Using dev_dbg to avoid messages during runtime PM to avoid
7935 * never-ending cycles of messages written back to storage by
7936 * user space causing runtime resume, causing more messages and
7937 * so on.
7938 */
7939 dev_dbg(hba->dev,
7940 "%s: Regulator capability was not set, actvIccLevel=%d",
7941 __func__, icc_level);
7942 goto out;
7943 }
7944
7945 if (hba->vreg_info.vcc->max_uA)
7946 icc_level = ufshcd_get_max_icc_level(
7947 hba->vreg_info.vcc->max_uA,
7948 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
7949 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
7950
7951 if (hba->vreg_info.vccq->max_uA)
7952 icc_level = ufshcd_get_max_icc_level(
7953 hba->vreg_info.vccq->max_uA,
7954 icc_level,
7955 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
7956
7957 if (hba->vreg_info.vccq2->max_uA)
7958 icc_level = ufshcd_get_max_icc_level(
7959 hba->vreg_info.vccq2->max_uA,
7960 icc_level,
7961 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
7962 out:
7963 return icc_level;
7964 }
7965
ufshcd_set_active_icc_lvl(struct ufs_hba * hba)7966 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
7967 {
7968 int ret;
7969 u8 *desc_buf;
7970 u32 icc_level;
7971
7972 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
7973 if (!desc_buf)
7974 return;
7975
7976 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
7977 desc_buf, QUERY_DESC_MAX_SIZE);
7978 if (ret) {
7979 dev_err(hba->dev,
7980 "%s: Failed reading power descriptor ret = %d",
7981 __func__, ret);
7982 goto out;
7983 }
7984
7985 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
7986 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
7987
7988 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
7989 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
7990
7991 if (ret)
7992 dev_err(hba->dev,
7993 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7994 __func__, icc_level, ret);
7995
7996 out:
7997 kfree(desc_buf);
7998 }
7999
ufshcd_blk_pm_runtime_init(struct scsi_device * sdev)8000 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
8001 {
8002 struct Scsi_Host *shost = sdev->host;
8003
8004 scsi_autopm_get_device(sdev);
8005 blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
8006 if (sdev->rpm_autosuspend)
8007 pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
8008 shost->rpm_autosuspend_delay);
8009 scsi_autopm_put_device(sdev);
8010 }
8011
8012 /**
8013 * ufshcd_scsi_add_wlus - Adds required W-LUs
8014 * @hba: per-adapter instance
8015 *
8016 * UFS device specification requires the UFS devices to support 4 well known
8017 * logical units:
8018 * "REPORT_LUNS" (address: 01h)
8019 * "UFS Device" (address: 50h)
8020 * "RPMB" (address: 44h)
8021 * "BOOT" (address: 30h)
8022 * UFS device's power management needs to be controlled by "POWER CONDITION"
8023 * field of SSU (START STOP UNIT) command. But this "power condition" field
8024 * will take effect only when its sent to "UFS device" well known logical unit
8025 * hence we require the scsi_device instance to represent this logical unit in
8026 * order for the UFS host driver to send the SSU command for power management.
8027 *
8028 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
8029 * Block) LU so user space process can control this LU. User space may also
8030 * want to have access to BOOT LU.
8031 *
8032 * This function adds scsi device instances for each of all well known LUs
8033 * (except "REPORT LUNS" LU).
8034 *
8035 * Return: zero on success (all required W-LUs are added successfully),
8036 * non-zero error value on failure (if failed to add any of the required W-LU).
8037 */
ufshcd_scsi_add_wlus(struct ufs_hba * hba)8038 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
8039 {
8040 int ret = 0;
8041 struct scsi_device *sdev_boot, *sdev_rpmb;
8042
8043 hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
8044 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
8045 if (IS_ERR(hba->ufs_device_wlun)) {
8046 ret = PTR_ERR(hba->ufs_device_wlun);
8047 hba->ufs_device_wlun = NULL;
8048 goto out;
8049 }
8050 scsi_device_put(hba->ufs_device_wlun);
8051
8052 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
8053 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
8054 if (IS_ERR(sdev_rpmb)) {
8055 ret = PTR_ERR(sdev_rpmb);
8056 goto remove_ufs_device_wlun;
8057 }
8058 ufshcd_blk_pm_runtime_init(sdev_rpmb);
8059 scsi_device_put(sdev_rpmb);
8060
8061 sdev_boot = __scsi_add_device(hba->host, 0, 0,
8062 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
8063 if (IS_ERR(sdev_boot)) {
8064 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
8065 } else {
8066 ufshcd_blk_pm_runtime_init(sdev_boot);
8067 scsi_device_put(sdev_boot);
8068 }
8069 goto out;
8070
8071 remove_ufs_device_wlun:
8072 scsi_remove_device(hba->ufs_device_wlun);
8073 out:
8074 return ret;
8075 }
8076
ufshcd_wb_probe(struct ufs_hba * hba,const u8 * desc_buf)8077 static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
8078 {
8079 struct ufs_dev_info *dev_info = &hba->dev_info;
8080 u8 lun;
8081 u32 d_lu_wb_buf_alloc;
8082 u32 ext_ufs_feature;
8083
8084 if (!ufshcd_is_wb_allowed(hba))
8085 return;
8086
8087 /*
8088 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8089 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8090 * enabled
8091 */
8092 if (!(dev_info->wspecversion >= 0x310 ||
8093 dev_info->wspecversion == 0x220 ||
8094 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
8095 goto wb_disabled;
8096
8097 ext_ufs_feature = get_unaligned_be32(desc_buf +
8098 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8099
8100 if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
8101 goto wb_disabled;
8102
8103 /*
8104 * WB may be supported but not configured while provisioning. The spec
8105 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8106 * buffer configured.
8107 */
8108 dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
8109
8110 dev_info->b_presrv_uspc_en =
8111 desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
8112
8113 if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
8114 if (!get_unaligned_be32(desc_buf +
8115 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
8116 goto wb_disabled;
8117 } else {
8118 for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
8119 d_lu_wb_buf_alloc = 0;
8120 ufshcd_read_unit_desc_param(hba,
8121 lun,
8122 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
8123 (u8 *)&d_lu_wb_buf_alloc,
8124 sizeof(d_lu_wb_buf_alloc));
8125 if (d_lu_wb_buf_alloc) {
8126 dev_info->wb_dedicated_lu = lun;
8127 break;
8128 }
8129 }
8130
8131 if (!d_lu_wb_buf_alloc)
8132 goto wb_disabled;
8133 }
8134
8135 if (!ufshcd_is_wb_buf_lifetime_available(hba))
8136 goto wb_disabled;
8137
8138 return;
8139
8140 wb_disabled:
8141 hba->caps &= ~UFSHCD_CAP_WB_EN;
8142 }
8143
ufshcd_temp_notif_probe(struct ufs_hba * hba,const u8 * desc_buf)8144 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
8145 {
8146 struct ufs_dev_info *dev_info = &hba->dev_info;
8147 u32 ext_ufs_feature;
8148 u8 mask = 0;
8149
8150 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
8151 return;
8152
8153 ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8154
8155 if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
8156 mask |= MASK_EE_TOO_LOW_TEMP;
8157
8158 if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
8159 mask |= MASK_EE_TOO_HIGH_TEMP;
8160
8161 if (mask) {
8162 ufshcd_enable_ee(hba, mask);
8163 ufs_hwmon_probe(hba, mask);
8164 }
8165 }
8166
ufshcd_device_lvl_exception_probe(struct ufs_hba * hba,u8 * desc_buf)8167 static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
8168 {
8169 u32 ext_ufs_feature;
8170
8171 if (hba->dev_info.wspecversion < 0x410)
8172 return;
8173
8174 ext_ufs_feature = get_unaligned_be32(desc_buf +
8175 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
8176 if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
8177 return;
8178
8179 atomic_set(&hba->dev_lvl_exception_count, 0);
8180 ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
8181 }
8182
ufshcd_set_rtt(struct ufs_hba * hba)8183 static void ufshcd_set_rtt(struct ufs_hba *hba)
8184 {
8185 struct ufs_dev_info *dev_info = &hba->dev_info;
8186 u32 rtt = 0;
8187 u32 dev_rtt = 0;
8188 int host_rtt_cap = hba->vops && hba->vops->max_num_rtt ?
8189 hba->vops->max_num_rtt : hba->nortt;
8190
8191 /* RTT override makes sense only for UFS-4.0 and above */
8192 if (dev_info->wspecversion < 0x400)
8193 return;
8194
8195 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8196 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &dev_rtt)) {
8197 dev_err(hba->dev, "failed reading bMaxNumOfRTT\n");
8198 return;
8199 }
8200
8201 /* do not override if it was already written */
8202 if (dev_rtt != DEFAULT_MAX_NUM_RTT)
8203 return;
8204
8205 rtt = min_t(int, dev_info->rtt_cap, host_rtt_cap);
8206
8207 if (rtt == dev_rtt)
8208 return;
8209
8210 if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8211 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt))
8212 dev_err(hba->dev, "failed writing bMaxNumOfRTT\n");
8213 }
8214
ufshcd_fixup_dev_quirks(struct ufs_hba * hba,const struct ufs_dev_quirk * fixups)8215 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
8216 const struct ufs_dev_quirk *fixups)
8217 {
8218 const struct ufs_dev_quirk *f;
8219 struct ufs_dev_info *dev_info = &hba->dev_info;
8220
8221 if (!fixups)
8222 return;
8223
8224 for (f = fixups; f->quirk; f++) {
8225 if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
8226 f->wmanufacturerid == UFS_ANY_VENDOR) &&
8227 ((dev_info->model &&
8228 STR_PRFX_EQUAL(f->model, dev_info->model)) ||
8229 !strcmp(f->model, UFS_ANY_MODEL)))
8230 hba->dev_quirks |= f->quirk;
8231 }
8232 }
8233 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
8234
ufs_fixup_device_setup(struct ufs_hba * hba)8235 static void ufs_fixup_device_setup(struct ufs_hba *hba)
8236 {
8237 /* fix by general quirk table */
8238 ufshcd_fixup_dev_quirks(hba, ufs_fixups);
8239
8240 /* allow vendors to fix quirks */
8241 ufshcd_vops_fixup_dev_quirks(hba);
8242 }
8243
ufshcd_update_rtc(struct ufs_hba * hba)8244 static void ufshcd_update_rtc(struct ufs_hba *hba)
8245 {
8246 struct timespec64 ts64;
8247 int err;
8248 u32 val;
8249
8250 ktime_get_real_ts64(&ts64);
8251
8252 if (ts64.tv_sec < hba->dev_info.rtc_time_baseline) {
8253 dev_warn_once(hba->dev, "%s: Current time precedes previous setting!\n", __func__);
8254 return;
8255 }
8256
8257 /*
8258 * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8259 * 2146 is required, it is recommended to choose the relative RTC mode.
8260 */
8261 val = ts64.tv_sec - hba->dev_info.rtc_time_baseline;
8262
8263 /* Skip update RTC if RPM state is not RPM_ACTIVE */
8264 if (ufshcd_rpm_get_if_active(hba) <= 0)
8265 return;
8266
8267 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
8268 0, 0, &val);
8269 ufshcd_rpm_put(hba);
8270
8271 if (err)
8272 dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
8273 else if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
8274 hba->dev_info.rtc_time_baseline = ts64.tv_sec;
8275 }
8276
ufshcd_rtc_work(struct work_struct * work)8277 static void ufshcd_rtc_work(struct work_struct *work)
8278 {
8279 struct ufs_hba *hba;
8280
8281 hba = container_of(to_delayed_work(work), struct ufs_hba, ufs_rtc_update_work);
8282
8283 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8284 if (!ufshcd_is_ufs_dev_busy(hba) &&
8285 hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL &&
8286 !hba->clk_gating.active_reqs)
8287 ufshcd_update_rtc(hba);
8288
8289 if (ufshcd_is_ufs_dev_active(hba) && hba->dev_info.rtc_update_period)
8290 schedule_delayed_work(&hba->ufs_rtc_update_work,
8291 msecs_to_jiffies(hba->dev_info.rtc_update_period));
8292 }
8293
ufs_init_rtc(struct ufs_hba * hba,u8 * desc_buf)8294 static void ufs_init_rtc(struct ufs_hba *hba, u8 *desc_buf)
8295 {
8296 u16 periodic_rtc_update = get_unaligned_be16(&desc_buf[DEVICE_DESC_PARAM_FRQ_RTC]);
8297 struct ufs_dev_info *dev_info = &hba->dev_info;
8298
8299 if (periodic_rtc_update & UFS_RTC_TIME_BASELINE) {
8300 dev_info->rtc_type = UFS_RTC_ABSOLUTE;
8301
8302 /*
8303 * The concept of measuring time in Linux as the number of seconds elapsed since
8304 * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8305 * 2010 00:00, here we need to adjust ABS baseline.
8306 */
8307 dev_info->rtc_time_baseline = mktime64(2010, 1, 1, 0, 0, 0) -
8308 mktime64(1970, 1, 1, 0, 0, 0);
8309 } else {
8310 dev_info->rtc_type = UFS_RTC_RELATIVE;
8311 dev_info->rtc_time_baseline = 0;
8312 }
8313
8314 /*
8315 * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8316 * how to calculate the specific update period for each time unit. And we disable periodic
8317 * RTC update work, let user configure by sysfs node according to specific circumstance.
8318 */
8319 dev_info->rtc_update_period = 0;
8320 }
8321
ufs_get_device_desc(struct ufs_hba * hba)8322 static int ufs_get_device_desc(struct ufs_hba *hba)
8323 {
8324 int err;
8325 u8 model_index;
8326 u8 *desc_buf;
8327 struct ufs_dev_info *dev_info = &hba->dev_info;
8328
8329 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8330 if (!desc_buf) {
8331 err = -ENOMEM;
8332 goto out;
8333 }
8334
8335 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
8336 QUERY_DESC_MAX_SIZE);
8337 if (err) {
8338 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
8339 __func__, err);
8340 goto out;
8341 }
8342
8343 /*
8344 * getting vendor (manufacturerID) and Bank Index in big endian
8345 * format
8346 */
8347 dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
8348 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
8349
8350 /* getting Specification Version in big endian format */
8351 dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
8352 desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
8353 dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
8354
8355 dev_info->rtt_cap = desc_buf[DEVICE_DESC_PARAM_RTT_CAP];
8356
8357 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
8358
8359 err = ufshcd_read_string_desc(hba, model_index,
8360 &dev_info->model, SD_ASCII_STD);
8361 if (err < 0) {
8362 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
8363 __func__, err);
8364 goto out;
8365 }
8366
8367 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
8368 desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
8369
8370 ufs_fixup_device_setup(hba);
8371
8372 ufshcd_wb_probe(hba, desc_buf);
8373
8374 ufshcd_temp_notif_probe(hba, desc_buf);
8375
8376 if (dev_info->wspecversion >= 0x410) {
8377 hba->critical_health_count = 0;
8378 ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
8379 }
8380
8381 ufs_init_rtc(hba, desc_buf);
8382
8383 ufshcd_device_lvl_exception_probe(hba, desc_buf);
8384
8385 /*
8386 * ufshcd_read_string_desc returns size of the string
8387 * reset the error value
8388 */
8389 err = 0;
8390
8391 out:
8392 kfree(desc_buf);
8393 return err;
8394 }
8395
ufs_put_device_desc(struct ufs_hba * hba)8396 static void ufs_put_device_desc(struct ufs_hba *hba)
8397 {
8398 struct ufs_dev_info *dev_info = &hba->dev_info;
8399
8400 kfree(dev_info->model);
8401 dev_info->model = NULL;
8402 }
8403
8404 /**
8405 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8406 * less than device PA_TACTIVATE time.
8407 * @hba: per-adapter instance
8408 *
8409 * Some UFS devices require host PA_TACTIVATE to be lower than device
8410 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8411 * for such devices.
8412 *
8413 * Return: zero on success, non-zero error value on failure.
8414 */
ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba * hba)8415 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8416 {
8417 int ret = 0;
8418 u32 granularity, peer_granularity;
8419 u32 pa_tactivate, peer_pa_tactivate;
8420 u32 pa_tactivate_us, peer_pa_tactivate_us;
8421 static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
8422
8423 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8424 &granularity);
8425 if (ret)
8426 goto out;
8427
8428 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
8429 &peer_granularity);
8430 if (ret)
8431 goto out;
8432
8433 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
8434 (granularity > PA_GRANULARITY_MAX_VAL)) {
8435 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
8436 __func__, granularity);
8437 return -EINVAL;
8438 }
8439
8440 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
8441 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
8442 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
8443 __func__, peer_granularity);
8444 return -EINVAL;
8445 }
8446
8447 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
8448 if (ret)
8449 goto out;
8450
8451 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
8452 &peer_pa_tactivate);
8453 if (ret)
8454 goto out;
8455
8456 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
8457 peer_pa_tactivate_us = peer_pa_tactivate *
8458 gran_to_us_table[peer_granularity - 1];
8459
8460 if (pa_tactivate_us >= peer_pa_tactivate_us) {
8461 u32 new_peer_pa_tactivate;
8462
8463 new_peer_pa_tactivate = pa_tactivate_us /
8464 gran_to_us_table[peer_granularity - 1];
8465 new_peer_pa_tactivate++;
8466 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
8467 new_peer_pa_tactivate);
8468 }
8469
8470 out:
8471 return ret;
8472 }
8473
8474 /**
8475 * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
8476 * @hba: per-adapter instance
8477 *
8478 * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
8479 * to ensure proper hibernation timing. This function retrieves the current
8480 * PA_HIBERN8TIME value and increments it by 100us.
8481 */
ufshcd_quirk_override_pa_h8time(struct ufs_hba * hba)8482 static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
8483 {
8484 u32 pa_h8time;
8485 int ret;
8486
8487 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
8488 if (ret) {
8489 dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
8490 return;
8491 }
8492
8493 /* Increment by 1 to increase hibernation time by 100 µs */
8494 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
8495 if (ret)
8496 dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
8497 }
8498
ufshcd_tune_unipro_params(struct ufs_hba * hba)8499 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
8500 {
8501 ufshcd_vops_apply_dev_quirks(hba);
8502
8503 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
8504 /* set 1ms timeout for PA_TACTIVATE */
8505 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
8506
8507 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
8508 ufshcd_quirk_tune_host_pa_tactivate(hba);
8509
8510 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
8511 ufshcd_quirk_override_pa_h8time(hba);
8512 }
8513
ufshcd_clear_dbg_ufs_stats(struct ufs_hba * hba)8514 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
8515 {
8516 hba->ufs_stats.hibern8_exit_cnt = 0;
8517 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
8518 hba->req_abort_count = 0;
8519 }
8520
ufshcd_device_geo_params_init(struct ufs_hba * hba)8521 static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
8522 {
8523 int err;
8524 u8 *desc_buf;
8525
8526 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
8527 if (!desc_buf) {
8528 err = -ENOMEM;
8529 goto out;
8530 }
8531
8532 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
8533 desc_buf, QUERY_DESC_MAX_SIZE);
8534 if (err) {
8535 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
8536 __func__, err);
8537 goto out;
8538 }
8539
8540 if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
8541 hba->dev_info.max_lu_supported = 32;
8542 else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
8543 hba->dev_info.max_lu_supported = 8;
8544
8545 out:
8546 kfree(desc_buf);
8547 return err;
8548 }
8549
8550 struct ufs_ref_clk {
8551 unsigned long freq_hz;
8552 enum ufs_ref_clk_freq val;
8553 };
8554
8555 static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
8556 {19200000, REF_CLK_FREQ_19_2_MHZ},
8557 {26000000, REF_CLK_FREQ_26_MHZ},
8558 {38400000, REF_CLK_FREQ_38_4_MHZ},
8559 {52000000, REF_CLK_FREQ_52_MHZ},
8560 {0, REF_CLK_FREQ_INVAL},
8561 };
8562
8563 static enum ufs_ref_clk_freq
ufs_get_bref_clk_from_hz(unsigned long freq)8564 ufs_get_bref_clk_from_hz(unsigned long freq)
8565 {
8566 int i;
8567
8568 for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
8569 if (ufs_ref_clk_freqs[i].freq_hz == freq)
8570 return ufs_ref_clk_freqs[i].val;
8571
8572 return REF_CLK_FREQ_INVAL;
8573 }
8574
ufshcd_parse_dev_ref_clk_freq(struct ufs_hba * hba,struct clk * refclk)8575 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
8576 {
8577 unsigned long freq;
8578
8579 freq = clk_get_rate(refclk);
8580
8581 hba->dev_ref_clk_freq =
8582 ufs_get_bref_clk_from_hz(freq);
8583
8584 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
8585 dev_err(hba->dev,
8586 "invalid ref_clk setting = %ld\n", freq);
8587 }
8588
ufshcd_set_dev_ref_clk(struct ufs_hba * hba)8589 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
8590 {
8591 int err;
8592 u32 ref_clk;
8593 u32 freq = hba->dev_ref_clk_freq;
8594
8595 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
8596 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
8597
8598 if (err) {
8599 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
8600 err);
8601 goto out;
8602 }
8603
8604 if (ref_clk == freq)
8605 goto out; /* nothing to update */
8606
8607 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
8608 QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
8609
8610 if (err) {
8611 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
8612 ufs_ref_clk_freqs[freq].freq_hz);
8613 goto out;
8614 }
8615
8616 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
8617 ufs_ref_clk_freqs[freq].freq_hz);
8618
8619 out:
8620 return err;
8621 }
8622
ufshcd_device_params_init(struct ufs_hba * hba)8623 static int ufshcd_device_params_init(struct ufs_hba *hba)
8624 {
8625 bool flag;
8626 int ret;
8627
8628 /* Init UFS geometry descriptor related parameters */
8629 ret = ufshcd_device_geo_params_init(hba);
8630 if (ret)
8631 goto out;
8632
8633 /* Check and apply UFS device quirks */
8634 ret = ufs_get_device_desc(hba);
8635 if (ret) {
8636 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
8637 __func__, ret);
8638 goto out;
8639 }
8640
8641 ufshcd_set_rtt(hba);
8642
8643 ufshcd_get_ref_clk_gating_wait(hba);
8644
8645 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
8646 QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
8647 hba->dev_info.f_power_on_wp_en = flag;
8648
8649 /* Probe maximum power mode co-supported by both UFS host and device */
8650 if (ufshcd_get_max_pwr_mode(hba))
8651 dev_err(hba->dev,
8652 "%s: Failed getting max supported power mode\n",
8653 __func__);
8654 out:
8655 return ret;
8656 }
8657
ufshcd_set_timestamp_attr(struct ufs_hba * hba)8658 static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
8659 {
8660 int err;
8661 struct ufs_query_req *request = NULL;
8662 struct ufs_query_res *response = NULL;
8663 struct ufs_dev_info *dev_info = &hba->dev_info;
8664 struct utp_upiu_query_v4_0 *upiu_data;
8665
8666 if (dev_info->wspecversion < 0x400)
8667 return;
8668
8669 ufshcd_dev_man_lock(hba);
8670
8671 ufshcd_init_query(hba, &request, &response,
8672 UPIU_QUERY_OPCODE_WRITE_ATTR,
8673 QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
8674
8675 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
8676
8677 upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
8678
8679 put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
8680
8681 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
8682
8683 if (err)
8684 dev_err(hba->dev, "%s: failed to set timestamp %d\n",
8685 __func__, err);
8686
8687 ufshcd_dev_man_unlock(hba);
8688 }
8689
8690 /**
8691 * ufshcd_add_lus - probe and add UFS logical units
8692 * @hba: per-adapter instance
8693 *
8694 * Return: 0 upon success; < 0 upon failure.
8695 */
ufshcd_add_lus(struct ufs_hba * hba)8696 static int ufshcd_add_lus(struct ufs_hba *hba)
8697 {
8698 int ret;
8699
8700 /* Add required well known logical units to scsi mid layer */
8701 ret = ufshcd_scsi_add_wlus(hba);
8702 if (ret)
8703 goto out;
8704
8705 /* Initialize devfreq after UFS device is detected */
8706 if (ufshcd_is_clkscaling_supported(hba)) {
8707 memcpy(&hba->clk_scaling.saved_pwr_info,
8708 &hba->pwr_info,
8709 sizeof(struct ufs_pa_layer_attr));
8710 hba->clk_scaling.is_allowed = true;
8711
8712 ret = ufshcd_devfreq_init(hba);
8713 if (ret)
8714 goto out;
8715
8716 hba->clk_scaling.is_enabled = true;
8717 ufshcd_init_clk_scaling_sysfs(hba);
8718 }
8719
8720 /*
8721 * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
8722 * pointer and hence must only be started after the WLUN pointer has
8723 * been initialized by ufshcd_scsi_add_wlus().
8724 */
8725 schedule_delayed_work(&hba->ufs_rtc_update_work,
8726 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
8727
8728 ufs_bsg_probe(hba);
8729 scsi_scan_host(hba->host);
8730
8731 out:
8732 return ret;
8733 }
8734
8735 /* SDB - Single Doorbell */
ufshcd_release_sdb_queue(struct ufs_hba * hba,int nutrs)8736 static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
8737 {
8738 size_t ucdl_size, utrdl_size;
8739
8740 ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
8741 dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
8742 hba->ucdl_dma_addr);
8743
8744 utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
8745 dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
8746 hba->utrdl_dma_addr);
8747
8748 devm_kfree(hba->dev, hba->lrb);
8749 }
8750
ufshcd_alloc_mcq(struct ufs_hba * hba)8751 static int ufshcd_alloc_mcq(struct ufs_hba *hba)
8752 {
8753 int ret;
8754 int old_nutrs = hba->nutrs;
8755
8756 ret = ufshcd_mcq_decide_queue_depth(hba);
8757 if (ret < 0)
8758 return ret;
8759
8760 hba->nutrs = ret;
8761 ret = ufshcd_mcq_init(hba);
8762 if (ret)
8763 goto err;
8764
8765 /*
8766 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8767 * Number of supported tags in MCQ mode may be larger than SDB mode.
8768 */
8769 if (hba->nutrs != old_nutrs) {
8770 ufshcd_release_sdb_queue(hba, old_nutrs);
8771 ret = ufshcd_memory_alloc(hba);
8772 if (ret)
8773 goto err;
8774 ufshcd_host_memory_configure(hba);
8775 }
8776
8777 ret = ufshcd_mcq_memory_alloc(hba);
8778 if (ret)
8779 goto err;
8780
8781 hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
8782 hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
8783
8784 return 0;
8785 err:
8786 hba->nutrs = old_nutrs;
8787 return ret;
8788 }
8789
ufshcd_config_mcq(struct ufs_hba * hba)8790 static void ufshcd_config_mcq(struct ufs_hba *hba)
8791 {
8792 int ret;
8793 u32 intrs;
8794
8795 ret = ufshcd_mcq_vops_config_esi(hba);
8796 dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
8797
8798 intrs = UFSHCD_ENABLE_MCQ_INTRS;
8799 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
8800 intrs &= ~MCQ_CQ_EVENT_STATUS;
8801 ufshcd_enable_intr(hba, intrs);
8802 ufshcd_mcq_make_queues_operational(hba);
8803 ufshcd_mcq_config_mac(hba, hba->nutrs);
8804
8805 dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8806 hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
8807 hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
8808 hba->nutrs);
8809 }
8810
ufshcd_post_device_init(struct ufs_hba * hba)8811 static int ufshcd_post_device_init(struct ufs_hba *hba)
8812 {
8813 int ret;
8814
8815 ufshcd_tune_unipro_params(hba);
8816
8817 /* UFS device is also active now */
8818 ufshcd_set_ufs_dev_active(hba);
8819 ufshcd_force_reset_auto_bkops(hba);
8820
8821 ufshcd_set_timestamp_attr(hba);
8822
8823 if (!hba->max_pwr_info.is_valid)
8824 return 0;
8825
8826 /*
8827 * Set the right value to bRefClkFreq before attempting to
8828 * switch to HS gears.
8829 */
8830 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
8831 ufshcd_set_dev_ref_clk(hba);
8832 /* Gear up to HS gear. */
8833 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
8834 if (ret) {
8835 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
8836 __func__, ret);
8837 return ret;
8838 }
8839
8840 return 0;
8841 }
8842
ufshcd_device_init(struct ufs_hba * hba,bool init_dev_params)8843 static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
8844 {
8845 int ret;
8846
8847 WARN_ON_ONCE(!hba->scsi_host_added);
8848
8849 hba->ufshcd_state = UFSHCD_STATE_RESET;
8850
8851 ret = ufshcd_link_startup(hba);
8852 if (ret)
8853 return ret;
8854
8855 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
8856 return ret;
8857
8858 /* Debug counters initialization */
8859 ufshcd_clear_dbg_ufs_stats(hba);
8860
8861 /* UniPro link is active now */
8862 ufshcd_set_link_active(hba);
8863
8864 /* Reconfigure MCQ upon reset */
8865 if (hba->mcq_enabled && !init_dev_params) {
8866 ufshcd_config_mcq(hba);
8867 ufshcd_mcq_enable(hba);
8868 }
8869
8870 /* Verify device initialization by sending NOP OUT UPIU */
8871 ret = ufshcd_verify_dev_init(hba);
8872 if (ret)
8873 return ret;
8874
8875 /* Initiate UFS initialization, and waiting until completion */
8876 ret = ufshcd_complete_dev_init(hba);
8877 if (ret)
8878 return ret;
8879
8880 /*
8881 * Initialize UFS device parameters used by driver, these
8882 * parameters are associated with UFS descriptors.
8883 */
8884 if (init_dev_params) {
8885 ret = ufshcd_device_params_init(hba);
8886 if (ret)
8887 return ret;
8888 if (is_mcq_supported(hba) &&
8889 hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
8890 ufshcd_config_mcq(hba);
8891 ufshcd_mcq_enable(hba);
8892 }
8893 }
8894
8895 return ufshcd_post_device_init(hba);
8896 }
8897
8898 /**
8899 * ufshcd_probe_hba - probe hba to detect device and initialize it
8900 * @hba: per-adapter instance
8901 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8902 *
8903 * Execute link-startup and verify device initialization
8904 *
8905 * Return: 0 upon success; < 0 upon failure.
8906 */
ufshcd_probe_hba(struct ufs_hba * hba,bool init_dev_params)8907 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
8908 {
8909 int ret;
8910
8911 if (!hba->pm_op_in_progress &&
8912 (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
8913 /* Reset the device and controller before doing reinit */
8914 ufshcd_device_reset(hba);
8915 ufs_put_device_desc(hba);
8916 ufshcd_hba_stop(hba);
8917 ret = ufshcd_hba_enable(hba);
8918 if (ret) {
8919 dev_err(hba->dev, "Host controller enable failed\n");
8920 ufshcd_print_evt_hist(hba);
8921 ufshcd_print_host_state(hba);
8922 return ret;
8923 }
8924
8925 /* Reinit the device */
8926 ret = ufshcd_device_init(hba, init_dev_params);
8927 if (ret)
8928 return ret;
8929 }
8930
8931 ufshcd_print_pwr_info(hba);
8932
8933 /*
8934 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8935 * and for removable UFS card as well, hence always set the parameter.
8936 * Note: Error handler may issue the device reset hence resetting
8937 * bActiveICCLevel as well so it is always safe to set this here.
8938 */
8939 ufshcd_set_active_icc_lvl(hba);
8940
8941 /* Enable UFS Write Booster if supported */
8942 ufshcd_configure_wb(hba);
8943
8944 if (hba->ee_usr_mask)
8945 ufshcd_write_ee_control(hba);
8946 ufshcd_configure_auto_hibern8(hba);
8947
8948 return 0;
8949 }
8950
8951 /**
8952 * ufshcd_async_scan - asynchronous execution for probing hba
8953 * @data: data pointer to pass to this function
8954 * @cookie: cookie data
8955 */
ufshcd_async_scan(void * data,async_cookie_t cookie)8956 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
8957 {
8958 struct ufs_hba *hba = (struct ufs_hba *)data;
8959 ktime_t probe_start;
8960 int ret;
8961
8962 down(&hba->host_sem);
8963 /* Initialize hba, detect and initialize UFS device */
8964 probe_start = ktime_get();
8965 ret = ufshcd_probe_hba(hba, true);
8966 ufshcd_process_probe_result(hba, probe_start, ret);
8967 up(&hba->host_sem);
8968 if (ret)
8969 goto out;
8970
8971 /* Probe and add UFS logical units */
8972 ret = ufshcd_add_lus(hba);
8973
8974 out:
8975 pm_runtime_put_sync(hba->dev);
8976
8977 if (ret)
8978 dev_err(hba->dev, "%s failed: %d\n", __func__, ret);
8979 }
8980
ufshcd_eh_timed_out(struct scsi_cmnd * scmd)8981 static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
8982 {
8983 struct ufs_hba *hba = shost_priv(scmd->device->host);
8984
8985 if (!hba->system_suspending) {
8986 /* Activate the error handler in the SCSI core. */
8987 return SCSI_EH_NOT_HANDLED;
8988 }
8989
8990 /*
8991 * If we get here we know that no TMFs are outstanding and also that
8992 * the only pending command is a START STOP UNIT command. Handle the
8993 * timeout of that command directly to prevent a deadlock between
8994 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8995 */
8996 ufshcd_link_recovery(hba);
8997 dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
8998 __func__, hba->outstanding_tasks);
8999
9000 return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
9001 }
9002
9003 static const struct attribute_group *ufshcd_driver_groups[] = {
9004 &ufs_sysfs_unit_descriptor_group,
9005 &ufs_sysfs_lun_attributes_group,
9006 NULL,
9007 };
9008
9009 static struct ufs_hba_variant_params ufs_hba_vps = {
9010 .hba_enable_delay_us = 1000,
9011 .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
9012 .devfreq_profile.polling_ms = 100,
9013 .devfreq_profile.target = ufshcd_devfreq_target,
9014 .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
9015 .ondemand_data.upthreshold = 70,
9016 .ondemand_data.downdifferential = 5,
9017 };
9018
9019 static const struct scsi_host_template ufshcd_driver_template = {
9020 .module = THIS_MODULE,
9021 .name = UFSHCD,
9022 .proc_name = UFSHCD,
9023 .map_queues = ufshcd_map_queues,
9024 .queuecommand = ufshcd_queuecommand,
9025 .mq_poll = ufshcd_poll,
9026 .sdev_init = ufshcd_sdev_init,
9027 .sdev_configure = ufshcd_sdev_configure,
9028 .sdev_destroy = ufshcd_sdev_destroy,
9029 .change_queue_depth = ufshcd_change_queue_depth,
9030 .eh_abort_handler = ufshcd_abort,
9031 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
9032 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
9033 .eh_timed_out = ufshcd_eh_timed_out,
9034 .this_id = -1,
9035 .sg_tablesize = SG_ALL,
9036 .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
9037 .max_sectors = SZ_1M / SECTOR_SIZE,
9038 .max_host_blocked = 1,
9039 .track_queue_depth = 1,
9040 .skip_settle_delay = 1,
9041 .sdev_groups = ufshcd_driver_groups,
9042 };
9043
ufshcd_config_vreg_load(struct device * dev,struct ufs_vreg * vreg,int ua)9044 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
9045 int ua)
9046 {
9047 int ret;
9048
9049 if (!vreg)
9050 return 0;
9051
9052 /*
9053 * "set_load" operation shall be required on those regulators
9054 * which specifically configured current limitation. Otherwise
9055 * zero max_uA may cause unexpected behavior when regulator is
9056 * enabled or set as high power mode.
9057 */
9058 if (!vreg->max_uA)
9059 return 0;
9060
9061 ret = regulator_set_load(vreg->reg, ua);
9062 if (ret < 0) {
9063 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
9064 __func__, vreg->name, ua, ret);
9065 }
9066
9067 return ret;
9068 }
9069
ufshcd_config_vreg_lpm(struct ufs_hba * hba,struct ufs_vreg * vreg)9070 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
9071 struct ufs_vreg *vreg)
9072 {
9073 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
9074 }
9075
ufshcd_config_vreg_hpm(struct ufs_hba * hba,struct ufs_vreg * vreg)9076 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
9077 struct ufs_vreg *vreg)
9078 {
9079 if (!vreg)
9080 return 0;
9081
9082 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
9083 }
9084
ufshcd_config_vreg(struct device * dev,struct ufs_vreg * vreg,bool on)9085 static int ufshcd_config_vreg(struct device *dev,
9086 struct ufs_vreg *vreg, bool on)
9087 {
9088 if (regulator_count_voltages(vreg->reg) <= 0)
9089 return 0;
9090
9091 return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
9092 }
9093
ufshcd_enable_vreg(struct device * dev,struct ufs_vreg * vreg)9094 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
9095 {
9096 int ret = 0;
9097
9098 if (!vreg || vreg->enabled)
9099 goto out;
9100
9101 ret = ufshcd_config_vreg(dev, vreg, true);
9102 if (!ret)
9103 ret = regulator_enable(vreg->reg);
9104
9105 if (!ret)
9106 vreg->enabled = true;
9107 else
9108 dev_err(dev, "%s: %s enable failed, err=%d\n",
9109 __func__, vreg->name, ret);
9110 out:
9111 return ret;
9112 }
9113
ufshcd_disable_vreg(struct device * dev,struct ufs_vreg * vreg)9114 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
9115 {
9116 int ret = 0;
9117
9118 if (!vreg || !vreg->enabled || vreg->always_on)
9119 goto out;
9120
9121 ret = regulator_disable(vreg->reg);
9122
9123 if (!ret) {
9124 /* ignore errors on applying disable config */
9125 ufshcd_config_vreg(dev, vreg, false);
9126 vreg->enabled = false;
9127 } else {
9128 dev_err(dev, "%s: %s disable failed, err=%d\n",
9129 __func__, vreg->name, ret);
9130 }
9131 out:
9132 return ret;
9133 }
9134
ufshcd_setup_vreg(struct ufs_hba * hba,bool on)9135 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
9136 {
9137 int ret = 0;
9138 struct device *dev = hba->dev;
9139 struct ufs_vreg_info *info = &hba->vreg_info;
9140
9141 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
9142 if (ret)
9143 goto out;
9144
9145 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
9146 if (ret)
9147 goto out;
9148
9149 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
9150
9151 out:
9152 if (ret) {
9153 ufshcd_toggle_vreg(dev, info->vccq2, false);
9154 ufshcd_toggle_vreg(dev, info->vccq, false);
9155 ufshcd_toggle_vreg(dev, info->vcc, false);
9156 }
9157 return ret;
9158 }
9159
ufshcd_setup_hba_vreg(struct ufs_hba * hba,bool on)9160 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
9161 {
9162 struct ufs_vreg_info *info = &hba->vreg_info;
9163
9164 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
9165 }
9166
ufshcd_get_vreg(struct device * dev,struct ufs_vreg * vreg)9167 int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
9168 {
9169 int ret = 0;
9170
9171 if (!vreg)
9172 goto out;
9173
9174 vreg->reg = devm_regulator_get(dev, vreg->name);
9175 if (IS_ERR(vreg->reg)) {
9176 ret = PTR_ERR(vreg->reg);
9177 dev_err(dev, "%s: %s get failed, err=%d\n",
9178 __func__, vreg->name, ret);
9179 }
9180 out:
9181 return ret;
9182 }
9183 EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
9184
ufshcd_init_vreg(struct ufs_hba * hba)9185 static int ufshcd_init_vreg(struct ufs_hba *hba)
9186 {
9187 int ret = 0;
9188 struct device *dev = hba->dev;
9189 struct ufs_vreg_info *info = &hba->vreg_info;
9190
9191 ret = ufshcd_get_vreg(dev, info->vcc);
9192 if (ret)
9193 goto out;
9194
9195 ret = ufshcd_get_vreg(dev, info->vccq);
9196 if (!ret)
9197 ret = ufshcd_get_vreg(dev, info->vccq2);
9198 out:
9199 return ret;
9200 }
9201
ufshcd_init_hba_vreg(struct ufs_hba * hba)9202 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
9203 {
9204 struct ufs_vreg_info *info = &hba->vreg_info;
9205
9206 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
9207 }
9208
ufshcd_setup_clocks(struct ufs_hba * hba,bool on)9209 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
9210 {
9211 int ret = 0;
9212 struct ufs_clk_info *clki;
9213 struct list_head *head = &hba->clk_list_head;
9214 ktime_t start = ktime_get();
9215 bool clk_state_changed = false;
9216
9217 if (list_empty(head))
9218 goto out;
9219
9220 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
9221 if (ret)
9222 return ret;
9223
9224 list_for_each_entry(clki, head, list) {
9225 if (!IS_ERR_OR_NULL(clki->clk)) {
9226 /*
9227 * Don't disable clocks which are needed
9228 * to keep the link active.
9229 */
9230 if (ufshcd_is_link_active(hba) &&
9231 clki->keep_link_active)
9232 continue;
9233
9234 clk_state_changed = on ^ clki->enabled;
9235 if (on && !clki->enabled) {
9236 ret = clk_prepare_enable(clki->clk);
9237 if (ret) {
9238 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
9239 __func__, clki->name, ret);
9240 goto out;
9241 }
9242 } else if (!on && clki->enabled) {
9243 clk_disable_unprepare(clki->clk);
9244 }
9245 clki->enabled = on;
9246 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
9247 clki->name, on ? "en" : "dis");
9248 }
9249 }
9250
9251 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
9252 if (ret)
9253 return ret;
9254
9255 if (!ufshcd_is_clkscaling_supported(hba))
9256 ufshcd_pm_qos_update(hba, on);
9257 out:
9258 if (ret) {
9259 list_for_each_entry(clki, head, list) {
9260 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
9261 clk_disable_unprepare(clki->clk);
9262 }
9263 } else if (!ret && on && hba->clk_gating.is_initialized) {
9264 scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
9265 hba->clk_gating.state = CLKS_ON;
9266 trace_ufshcd_clk_gating(hba,
9267 hba->clk_gating.state);
9268 }
9269
9270 if (clk_state_changed)
9271 trace_ufshcd_profile_clk_gating(hba,
9272 (on ? "on" : "off"),
9273 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
9274 return ret;
9275 }
9276
ufshcd_parse_ref_clk_property(struct ufs_hba * hba)9277 static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
9278 {
9279 u32 freq;
9280 int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
9281
9282 if (ret) {
9283 dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
9284 return REF_CLK_FREQ_INVAL;
9285 }
9286
9287 return ufs_get_bref_clk_from_hz(freq);
9288 }
9289
ufshcd_init_clocks(struct ufs_hba * hba)9290 static int ufshcd_init_clocks(struct ufs_hba *hba)
9291 {
9292 int ret = 0;
9293 struct ufs_clk_info *clki;
9294 struct device *dev = hba->dev;
9295 struct list_head *head = &hba->clk_list_head;
9296
9297 if (list_empty(head))
9298 goto out;
9299
9300 list_for_each_entry(clki, head, list) {
9301 if (!clki->name)
9302 continue;
9303
9304 clki->clk = devm_clk_get(dev, clki->name);
9305 if (IS_ERR(clki->clk)) {
9306 ret = PTR_ERR(clki->clk);
9307 dev_err(dev, "%s: %s clk get failed, %d\n",
9308 __func__, clki->name, ret);
9309 goto out;
9310 }
9311
9312 /*
9313 * Parse device ref clk freq as per device tree "ref_clk".
9314 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9315 * in ufshcd_alloc_host().
9316 */
9317 if (!strcmp(clki->name, "ref_clk"))
9318 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
9319
9320 if (clki->max_freq) {
9321 ret = clk_set_rate(clki->clk, clki->max_freq);
9322 if (ret) {
9323 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
9324 __func__, clki->name,
9325 clki->max_freq, ret);
9326 goto out;
9327 }
9328 clki->curr_freq = clki->max_freq;
9329 }
9330 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
9331 clki->name, clk_get_rate(clki->clk));
9332 }
9333
9334 /* Set Max. frequency for all clocks */
9335 if (hba->use_pm_opp) {
9336 ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
9337 if (ret) {
9338 dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
9339 ret);
9340 goto out;
9341 }
9342 }
9343
9344 out:
9345 return ret;
9346 }
9347
ufshcd_variant_hba_init(struct ufs_hba * hba)9348 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
9349 {
9350 int err = 0;
9351
9352 if (!hba->vops)
9353 goto out;
9354
9355 err = ufshcd_vops_init(hba);
9356 if (err)
9357 dev_err_probe(hba->dev, err,
9358 "%s: variant %s init failed with err %d\n",
9359 __func__, ufshcd_get_var_name(hba), err);
9360 out:
9361 return err;
9362 }
9363
ufshcd_variant_hba_exit(struct ufs_hba * hba)9364 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
9365 {
9366 if (!hba->vops)
9367 return;
9368
9369 ufshcd_vops_exit(hba);
9370 }
9371
ufshcd_hba_init(struct ufs_hba * hba)9372 static int ufshcd_hba_init(struct ufs_hba *hba)
9373 {
9374 int err;
9375
9376 /*
9377 * Handle host controller power separately from the UFS device power
9378 * rails as it will help controlling the UFS host controller power
9379 * collapse easily which is different than UFS device power collapse.
9380 * Also, enable the host controller power before we go ahead with rest
9381 * of the initialization here.
9382 */
9383 err = ufshcd_init_hba_vreg(hba);
9384 if (err)
9385 goto out;
9386
9387 err = ufshcd_setup_hba_vreg(hba, true);
9388 if (err)
9389 goto out;
9390
9391 err = ufshcd_init_clocks(hba);
9392 if (err)
9393 goto out_disable_hba_vreg;
9394
9395 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
9396 hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
9397
9398 err = ufshcd_setup_clocks(hba, true);
9399 if (err)
9400 goto out_disable_hba_vreg;
9401
9402 err = ufshcd_init_vreg(hba);
9403 if (err)
9404 goto out_disable_clks;
9405
9406 err = ufshcd_setup_vreg(hba, true);
9407 if (err)
9408 goto out_disable_clks;
9409
9410 err = ufshcd_variant_hba_init(hba);
9411 if (err)
9412 goto out_disable_vreg;
9413
9414 ufs_debugfs_hba_init(hba);
9415 ufs_fault_inject_hba_init(hba);
9416
9417 hba->is_powered = true;
9418 goto out;
9419
9420 out_disable_vreg:
9421 ufshcd_setup_vreg(hba, false);
9422 out_disable_clks:
9423 ufshcd_setup_clocks(hba, false);
9424 out_disable_hba_vreg:
9425 ufshcd_setup_hba_vreg(hba, false);
9426 out:
9427 return err;
9428 }
9429
ufshcd_hba_exit(struct ufs_hba * hba)9430 static void ufshcd_hba_exit(struct ufs_hba *hba)
9431 {
9432 if (hba->is_powered) {
9433 ufshcd_pm_qos_exit(hba);
9434 ufshcd_exit_clk_scaling(hba);
9435 ufshcd_exit_clk_gating(hba);
9436 if (hba->eh_wq)
9437 destroy_workqueue(hba->eh_wq);
9438 ufs_debugfs_hba_exit(hba);
9439 ufshcd_variant_hba_exit(hba);
9440 ufshcd_setup_vreg(hba, false);
9441 ufshcd_setup_clocks(hba, false);
9442 ufshcd_setup_hba_vreg(hba, false);
9443 hba->is_powered = false;
9444 ufs_put_device_desc(hba);
9445 }
9446 }
9447
ufshcd_execute_start_stop(struct scsi_device * sdev,enum ufs_dev_pwr_mode pwr_mode,struct scsi_sense_hdr * sshdr)9448 static int ufshcd_execute_start_stop(struct scsi_device *sdev,
9449 enum ufs_dev_pwr_mode pwr_mode,
9450 struct scsi_sense_hdr *sshdr)
9451 {
9452 const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
9453 struct scsi_failure failure_defs[] = {
9454 {
9455 .allowed = 2,
9456 .result = SCMD_FAILURE_RESULT_ANY,
9457 },
9458 };
9459 struct scsi_failures failures = {
9460 .failure_definitions = failure_defs,
9461 };
9462 const struct scsi_exec_args args = {
9463 .failures = &failures,
9464 .sshdr = sshdr,
9465 .req_flags = BLK_MQ_REQ_PM,
9466 .scmd_flags = SCMD_FAIL_IF_RECOVERING,
9467 };
9468
9469 return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
9470 /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
9471 &args);
9472 }
9473
9474 /**
9475 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9476 * power mode
9477 * @hba: per adapter instance
9478 * @pwr_mode: device power mode to set
9479 *
9480 * Return: 0 if requested power mode is set successfully;
9481 * < 0 if failed to set the requested power mode.
9482 */
ufshcd_set_dev_pwr_mode(struct ufs_hba * hba,enum ufs_dev_pwr_mode pwr_mode)9483 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
9484 enum ufs_dev_pwr_mode pwr_mode)
9485 {
9486 struct scsi_sense_hdr sshdr;
9487 struct scsi_device *sdp;
9488 unsigned long flags;
9489 int ret;
9490
9491 spin_lock_irqsave(hba->host->host_lock, flags);
9492 sdp = hba->ufs_device_wlun;
9493 if (sdp && scsi_device_online(sdp))
9494 ret = scsi_device_get(sdp);
9495 else
9496 ret = -ENODEV;
9497 spin_unlock_irqrestore(hba->host->host_lock, flags);
9498
9499 if (ret)
9500 return ret;
9501
9502 /*
9503 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9504 * handling, which would wait for host to be resumed. Since we know
9505 * we are functional while we are here, skip host resume in error
9506 * handling context.
9507 */
9508 hba->host->eh_noresume = 1;
9509
9510 /*
9511 * Current function would be generally called from the power management
9512 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9513 * already suspended childs.
9514 */
9515 ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
9516 if (ret) {
9517 sdev_printk(KERN_WARNING, sdp,
9518 "START_STOP failed for power mode: %d, result %x\n",
9519 pwr_mode, ret);
9520 if (ret > 0) {
9521 if (scsi_sense_valid(&sshdr))
9522 scsi_print_sense_hdr(sdp, NULL, &sshdr);
9523 ret = -EIO;
9524 }
9525 } else {
9526 hba->curr_dev_pwr_mode = pwr_mode;
9527 }
9528
9529 scsi_device_put(sdp);
9530 hba->host->eh_noresume = 0;
9531 return ret;
9532 }
9533
ufshcd_link_state_transition(struct ufs_hba * hba,enum uic_link_state req_link_state,bool check_for_bkops)9534 static int ufshcd_link_state_transition(struct ufs_hba *hba,
9535 enum uic_link_state req_link_state,
9536 bool check_for_bkops)
9537 {
9538 int ret = 0;
9539
9540 if (req_link_state == hba->uic_link_state)
9541 return 0;
9542
9543 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
9544 ret = ufshcd_uic_hibern8_enter(hba);
9545 if (!ret) {
9546 ufshcd_set_link_hibern8(hba);
9547 } else {
9548 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9549 __func__, ret);
9550 goto out;
9551 }
9552 }
9553 /*
9554 * If autobkops is enabled, link can't be turned off because
9555 * turning off the link would also turn off the device, except in the
9556 * case of DeepSleep where the device is expected to remain powered.
9557 */
9558 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
9559 (!check_for_bkops || !hba->auto_bkops_enabled)) {
9560 /*
9561 * Let's make sure that link is in low power mode, we are doing
9562 * this currently by putting the link in Hibern8. Otherway to
9563 * put the link in low power mode is to send the DME end point
9564 * to device and then send the DME reset command to local
9565 * unipro. But putting the link in hibern8 is much faster.
9566 *
9567 * Note also that putting the link in Hibern8 is a requirement
9568 * for entering DeepSleep.
9569 */
9570 ret = ufshcd_uic_hibern8_enter(hba);
9571 if (ret) {
9572 dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
9573 __func__, ret);
9574 goto out;
9575 }
9576 /*
9577 * Change controller state to "reset state" which
9578 * should also put the link in off/reset state
9579 */
9580 ufshcd_hba_stop(hba);
9581 /*
9582 * TODO: Check if we need any delay to make sure that
9583 * controller is reset
9584 */
9585 ufshcd_set_link_off(hba);
9586 }
9587
9588 out:
9589 return ret;
9590 }
9591
ufshcd_vreg_set_lpm(struct ufs_hba * hba)9592 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
9593 {
9594 bool vcc_off = false;
9595
9596 /*
9597 * It seems some UFS devices may keep drawing more than sleep current
9598 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9599 * To avoid this situation, add 2ms delay before putting these UFS
9600 * rails in LPM mode.
9601 */
9602 if (!ufshcd_is_link_active(hba) &&
9603 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
9604 usleep_range(2000, 2100);
9605
9606 /*
9607 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9608 * power.
9609 *
9610 * If UFS device and link is in OFF state, all power supplies (VCC,
9611 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9612 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9613 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9614 *
9615 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9616 * in low power state which would save some power.
9617 *
9618 * If Write Booster is enabled and the device needs to flush the WB
9619 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9620 */
9621 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9622 !hba->dev_info.is_lu_power_on_wp) {
9623 ufshcd_setup_vreg(hba, false);
9624 vcc_off = true;
9625 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9626 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9627 vcc_off = true;
9628 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
9629 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9630 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
9631 }
9632 }
9633
9634 /*
9635 * Some UFS devices require delay after VCC power rail is turned-off.
9636 */
9637 if (vcc_off && hba->vreg_info.vcc &&
9638 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
9639 usleep_range(5000, 5100);
9640 }
9641
9642 #ifdef CONFIG_PM
ufshcd_vreg_set_hpm(struct ufs_hba * hba)9643 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
9644 {
9645 int ret = 0;
9646
9647 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
9648 !hba->dev_info.is_lu_power_on_wp) {
9649 ret = ufshcd_setup_vreg(hba, true);
9650 } else if (!ufshcd_is_ufs_dev_active(hba)) {
9651 if (!ufshcd_is_link_active(hba)) {
9652 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
9653 if (ret)
9654 goto vcc_disable;
9655 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
9656 if (ret)
9657 goto vccq_lpm;
9658 }
9659 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
9660 }
9661 goto out;
9662
9663 vccq_lpm:
9664 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
9665 vcc_disable:
9666 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
9667 out:
9668 return ret;
9669 }
9670 #endif /* CONFIG_PM */
9671
ufshcd_hba_vreg_set_lpm(struct ufs_hba * hba)9672 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
9673 {
9674 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9675 ufshcd_setup_hba_vreg(hba, false);
9676 }
9677
ufshcd_hba_vreg_set_hpm(struct ufs_hba * hba)9678 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
9679 {
9680 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
9681 ufshcd_setup_hba_vreg(hba, true);
9682 }
9683
__ufshcd_wl_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op)9684 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9685 {
9686 int ret = 0;
9687 bool check_for_bkops;
9688 enum ufs_pm_level pm_lvl;
9689 enum ufs_dev_pwr_mode req_dev_pwr_mode;
9690 enum uic_link_state req_link_state;
9691
9692 hba->pm_op_in_progress = true;
9693 if (pm_op != UFS_SHUTDOWN_PM) {
9694 pm_lvl = pm_op == UFS_RUNTIME_PM ?
9695 hba->rpm_lvl : hba->spm_lvl;
9696 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
9697 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
9698 } else {
9699 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
9700 req_link_state = UIC_LINK_OFF_STATE;
9701 }
9702
9703 /*
9704 * If we can't transition into any of the low power modes
9705 * just gate the clocks.
9706 */
9707 ufshcd_hold(hba);
9708 hba->clk_gating.is_suspended = true;
9709
9710 if (ufshcd_is_clkscaling_supported(hba))
9711 ufshcd_clk_scaling_suspend(hba, true);
9712
9713 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
9714 req_link_state == UIC_LINK_ACTIVE_STATE) {
9715 goto vops_suspend;
9716 }
9717
9718 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
9719 (req_link_state == hba->uic_link_state))
9720 goto enable_scaling;
9721
9722 /* UFS device & link must be active before we enter in this function */
9723 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
9724 /* Wait err handler finish or trigger err recovery */
9725 if (!ufshcd_eh_in_progress(hba))
9726 ufshcd_force_error_recovery(hba);
9727 ret = -EBUSY;
9728 goto enable_scaling;
9729 }
9730
9731 if (pm_op == UFS_RUNTIME_PM) {
9732 if (ufshcd_can_autobkops_during_suspend(hba)) {
9733 /*
9734 * The device is idle with no requests in the queue,
9735 * allow background operations if bkops status shows
9736 * that performance might be impacted.
9737 */
9738 ret = ufshcd_bkops_ctrl(hba);
9739 if (ret) {
9740 /*
9741 * If return err in suspend flow, IO will hang.
9742 * Trigger error handler and break suspend for
9743 * error recovery.
9744 */
9745 ufshcd_force_error_recovery(hba);
9746 ret = -EBUSY;
9747 goto enable_scaling;
9748 }
9749 } else {
9750 /* make sure that auto bkops is disabled */
9751 ufshcd_disable_auto_bkops(hba);
9752 }
9753 /*
9754 * If device needs to do BKOP or WB buffer flush during
9755 * Hibern8, keep device power mode as "active power mode"
9756 * and VCC supply.
9757 */
9758 hba->dev_info.b_rpm_dev_flush_capable =
9759 hba->auto_bkops_enabled ||
9760 (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
9761 ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
9762 ufshcd_is_auto_hibern8_enabled(hba))) &&
9763 ufshcd_wb_need_flush(hba));
9764 }
9765
9766 flush_work(&hba->eeh_work);
9767
9768 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9769 if (ret)
9770 goto enable_scaling;
9771
9772 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
9773 if (pm_op != UFS_RUNTIME_PM)
9774 /* ensure that bkops is disabled */
9775 ufshcd_disable_auto_bkops(hba);
9776
9777 if (!hba->dev_info.b_rpm_dev_flush_capable) {
9778 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
9779 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9780 /*
9781 * If return err in suspend flow, IO will hang.
9782 * Trigger error handler and break suspend for
9783 * error recovery.
9784 */
9785 ufshcd_force_error_recovery(hba);
9786 ret = -EBUSY;
9787 }
9788 if (ret)
9789 goto enable_scaling;
9790 }
9791 }
9792
9793 /*
9794 * In the case of DeepSleep, the device is expected to remain powered
9795 * with the link off, so do not check for bkops.
9796 */
9797 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
9798 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
9799 if (ret && pm_op != UFS_SHUTDOWN_PM) {
9800 /*
9801 * If return err in suspend flow, IO will hang.
9802 * Trigger error handler and break suspend for
9803 * error recovery.
9804 */
9805 ufshcd_force_error_recovery(hba);
9806 ret = -EBUSY;
9807 }
9808 if (ret)
9809 goto set_dev_active;
9810
9811 vops_suspend:
9812 /*
9813 * Call vendor specific suspend callback. As these callbacks may access
9814 * vendor specific host controller register space call them before the
9815 * host clocks are ON.
9816 */
9817 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9818 if (ret)
9819 goto set_link_active;
9820
9821 cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
9822 goto out;
9823
9824 set_link_active:
9825 /*
9826 * Device hardware reset is required to exit DeepSleep. Also, for
9827 * DeepSleep, the link is off so host reset and restore will be done
9828 * further below.
9829 */
9830 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9831 ufshcd_device_reset(hba);
9832 WARN_ON(!ufshcd_is_link_off(hba));
9833 }
9834 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
9835 ufshcd_set_link_active(hba);
9836 else if (ufshcd_is_link_off(hba))
9837 ufshcd_host_reset_and_restore(hba);
9838 set_dev_active:
9839 /* Can also get here needing to exit DeepSleep */
9840 if (ufshcd_is_ufs_dev_deepsleep(hba)) {
9841 ufshcd_device_reset(hba);
9842 ufshcd_host_reset_and_restore(hba);
9843 }
9844 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
9845 ufshcd_disable_auto_bkops(hba);
9846 enable_scaling:
9847 if (ufshcd_is_clkscaling_supported(hba))
9848 ufshcd_clk_scaling_suspend(hba, false);
9849
9850 hba->dev_info.b_rpm_dev_flush_capable = false;
9851 out:
9852 if (hba->dev_info.b_rpm_dev_flush_capable) {
9853 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
9854 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
9855 }
9856
9857 if (ret) {
9858 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
9859 hba->clk_gating.is_suspended = false;
9860 ufshcd_release(hba);
9861 }
9862 hba->pm_op_in_progress = false;
9863 return ret;
9864 }
9865
9866 #ifdef CONFIG_PM
__ufshcd_wl_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)9867 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9868 {
9869 int ret;
9870 enum uic_link_state old_link_state = hba->uic_link_state;
9871
9872 hba->pm_op_in_progress = true;
9873
9874 /*
9875 * Call vendor specific resume callback. As these callbacks may access
9876 * vendor specific host controller register space call them when the
9877 * host clocks are ON.
9878 */
9879 ret = ufshcd_vops_resume(hba, pm_op);
9880 if (ret)
9881 goto out;
9882
9883 /* For DeepSleep, the only supported option is to have the link off */
9884 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
9885
9886 if (ufshcd_is_link_hibern8(hba)) {
9887 ret = ufshcd_uic_hibern8_exit(hba);
9888 if (!ret) {
9889 ufshcd_set_link_active(hba);
9890 } else {
9891 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
9892 __func__, ret);
9893 goto vendor_suspend;
9894 }
9895 } else if (ufshcd_is_link_off(hba)) {
9896 /*
9897 * A full initialization of the host and the device is
9898 * required since the link was put to off during suspend.
9899 * Note, in the case of DeepSleep, the device will exit
9900 * DeepSleep due to device reset.
9901 */
9902 ret = ufshcd_reset_and_restore(hba);
9903 /*
9904 * ufshcd_reset_and_restore() should have already
9905 * set the link state as active
9906 */
9907 if (ret || !ufshcd_is_link_active(hba))
9908 goto vendor_suspend;
9909 }
9910
9911 if (!ufshcd_is_ufs_dev_active(hba)) {
9912 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
9913 if (ret)
9914 goto set_old_link_state;
9915 ufshcd_set_timestamp_attr(hba);
9916 schedule_delayed_work(&hba->ufs_rtc_update_work,
9917 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
9918 }
9919
9920 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
9921 ufshcd_enable_auto_bkops(hba);
9922 else
9923 /*
9924 * If BKOPs operations are urgently needed at this moment then
9925 * keep auto-bkops enabled or else disable it.
9926 */
9927 ufshcd_bkops_ctrl(hba);
9928
9929 if (hba->ee_usr_mask)
9930 ufshcd_write_ee_control(hba);
9931
9932 if (ufshcd_is_clkscaling_supported(hba))
9933 ufshcd_clk_scaling_suspend(hba, false);
9934
9935 if (hba->dev_info.b_rpm_dev_flush_capable) {
9936 hba->dev_info.b_rpm_dev_flush_capable = false;
9937 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
9938 }
9939
9940 ufshcd_configure_auto_hibern8(hba);
9941
9942 goto out;
9943
9944 set_old_link_state:
9945 ufshcd_link_state_transition(hba, old_link_state, 0);
9946 vendor_suspend:
9947 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
9948 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
9949 out:
9950 if (ret)
9951 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
9952 hba->clk_gating.is_suspended = false;
9953 ufshcd_release(hba);
9954 hba->pm_op_in_progress = false;
9955 return ret;
9956 }
9957
ufshcd_wl_runtime_suspend(struct device * dev)9958 static int ufshcd_wl_runtime_suspend(struct device *dev)
9959 {
9960 struct scsi_device *sdev = to_scsi_device(dev);
9961 struct ufs_hba *hba;
9962 int ret;
9963 ktime_t start = ktime_get();
9964
9965 hba = shost_priv(sdev->host);
9966
9967 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
9968 if (ret)
9969 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9970
9971 trace_ufshcd_wl_runtime_suspend(hba, ret,
9972 ktime_to_us(ktime_sub(ktime_get(), start)),
9973 hba->curr_dev_pwr_mode, hba->uic_link_state);
9974
9975 return ret;
9976 }
9977
ufshcd_wl_runtime_resume(struct device * dev)9978 static int ufshcd_wl_runtime_resume(struct device *dev)
9979 {
9980 struct scsi_device *sdev = to_scsi_device(dev);
9981 struct ufs_hba *hba;
9982 int ret = 0;
9983 ktime_t start = ktime_get();
9984
9985 hba = shost_priv(sdev->host);
9986
9987 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
9988 if (ret)
9989 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
9990
9991 trace_ufshcd_wl_runtime_resume(hba, ret,
9992 ktime_to_us(ktime_sub(ktime_get(), start)),
9993 hba->curr_dev_pwr_mode, hba->uic_link_state);
9994
9995 return ret;
9996 }
9997 #endif
9998
9999 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_suspend(struct device * dev)10000 static int ufshcd_wl_suspend(struct device *dev)
10001 {
10002 struct scsi_device *sdev = to_scsi_device(dev);
10003 struct ufs_hba *hba;
10004 int ret = 0;
10005 ktime_t start = ktime_get();
10006
10007 hba = shost_priv(sdev->host);
10008 down(&hba->host_sem);
10009 hba->system_suspending = true;
10010
10011 if (pm_runtime_suspended(dev))
10012 goto out;
10013
10014 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
10015 if (ret) {
10016 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10017 up(&hba->host_sem);
10018 }
10019
10020 out:
10021 if (!ret)
10022 hba->is_sys_suspended = true;
10023 trace_ufshcd_wl_suspend(hba, ret,
10024 ktime_to_us(ktime_sub(ktime_get(), start)),
10025 hba->curr_dev_pwr_mode, hba->uic_link_state);
10026
10027 return ret;
10028 }
10029
ufshcd_wl_resume(struct device * dev)10030 static int ufshcd_wl_resume(struct device *dev)
10031 {
10032 struct scsi_device *sdev = to_scsi_device(dev);
10033 struct ufs_hba *hba;
10034 int ret = 0;
10035 ktime_t start = ktime_get();
10036
10037 hba = shost_priv(sdev->host);
10038
10039 if (pm_runtime_suspended(dev))
10040 goto out;
10041
10042 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
10043 if (ret)
10044 dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
10045 out:
10046 trace_ufshcd_wl_resume(hba, ret,
10047 ktime_to_us(ktime_sub(ktime_get(), start)),
10048 hba->curr_dev_pwr_mode, hba->uic_link_state);
10049 if (!ret)
10050 hba->is_sys_suspended = false;
10051 hba->system_suspending = false;
10052 up(&hba->host_sem);
10053 return ret;
10054 }
10055 #endif
10056
10057 /**
10058 * ufshcd_suspend - helper function for suspend operations
10059 * @hba: per adapter instance
10060 *
10061 * This function will put disable irqs, turn off clocks
10062 * and set vreg and hba-vreg in lpm mode.
10063 *
10064 * Return: 0 upon success; < 0 upon failure.
10065 */
ufshcd_suspend(struct ufs_hba * hba)10066 static int ufshcd_suspend(struct ufs_hba *hba)
10067 {
10068 int ret;
10069
10070 if (!hba->is_powered)
10071 return 0;
10072 /*
10073 * Disable the host irq as host controller as there won't be any
10074 * host controller transaction expected till resume.
10075 */
10076 ufshcd_disable_irq(hba);
10077 ret = ufshcd_setup_clocks(hba, false);
10078 if (ret) {
10079 ufshcd_enable_irq(hba);
10080 return ret;
10081 }
10082 if (ufshcd_is_clkgating_allowed(hba)) {
10083 hba->clk_gating.state = CLKS_OFF;
10084 trace_ufshcd_clk_gating(hba,
10085 hba->clk_gating.state);
10086 }
10087
10088 ufshcd_vreg_set_lpm(hba);
10089 /* Put the host controller in low power mode if possible */
10090 ufshcd_hba_vreg_set_lpm(hba);
10091 ufshcd_pm_qos_update(hba, false);
10092 return ret;
10093 }
10094
10095 #ifdef CONFIG_PM
10096 /**
10097 * ufshcd_resume - helper function for resume operations
10098 * @hba: per adapter instance
10099 *
10100 * This function basically turns on the regulators, clocks and
10101 * irqs of the hba.
10102 *
10103 * Return: 0 for success and non-zero for failure.
10104 */
ufshcd_resume(struct ufs_hba * hba)10105 static int ufshcd_resume(struct ufs_hba *hba)
10106 {
10107 int ret;
10108
10109 if (!hba->is_powered)
10110 return 0;
10111
10112 ufshcd_hba_vreg_set_hpm(hba);
10113 ret = ufshcd_vreg_set_hpm(hba);
10114 if (ret)
10115 goto out;
10116
10117 /* Make sure clocks are enabled before accessing controller */
10118 ret = ufshcd_setup_clocks(hba, true);
10119 if (ret)
10120 goto disable_vreg;
10121
10122 /* enable the host irq as host controller would be active soon */
10123 ufshcd_enable_irq(hba);
10124
10125 goto out;
10126
10127 disable_vreg:
10128 ufshcd_vreg_set_lpm(hba);
10129 out:
10130 if (ret)
10131 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
10132 return ret;
10133 }
10134 #endif /* CONFIG_PM */
10135
10136 #ifdef CONFIG_PM_SLEEP
10137 /**
10138 * ufshcd_system_suspend - system suspend callback
10139 * @dev: Device associated with the UFS controller.
10140 *
10141 * Executed before putting the system into a sleep state in which the contents
10142 * of main memory are preserved.
10143 *
10144 * Return: 0 for success and non-zero for failure.
10145 */
ufshcd_system_suspend(struct device * dev)10146 int ufshcd_system_suspend(struct device *dev)
10147 {
10148 struct ufs_hba *hba = dev_get_drvdata(dev);
10149 int ret = 0;
10150 ktime_t start = ktime_get();
10151
10152 if (pm_runtime_suspended(hba->dev))
10153 goto out;
10154
10155 ret = ufshcd_suspend(hba);
10156 out:
10157 trace_ufshcd_system_suspend(hba, ret,
10158 ktime_to_us(ktime_sub(ktime_get(), start)),
10159 hba->curr_dev_pwr_mode, hba->uic_link_state);
10160 return ret;
10161 }
10162 EXPORT_SYMBOL(ufshcd_system_suspend);
10163
10164 /**
10165 * ufshcd_system_resume - system resume callback
10166 * @dev: Device associated with the UFS controller.
10167 *
10168 * Executed after waking the system up from a sleep state in which the contents
10169 * of main memory were preserved.
10170 *
10171 * Return: 0 for success and non-zero for failure.
10172 */
ufshcd_system_resume(struct device * dev)10173 int ufshcd_system_resume(struct device *dev)
10174 {
10175 struct ufs_hba *hba = dev_get_drvdata(dev);
10176 ktime_t start = ktime_get();
10177 int ret = 0;
10178
10179 if (pm_runtime_suspended(hba->dev))
10180 goto out;
10181
10182 ret = ufshcd_resume(hba);
10183
10184 out:
10185 trace_ufshcd_system_resume(hba, ret,
10186 ktime_to_us(ktime_sub(ktime_get(), start)),
10187 hba->curr_dev_pwr_mode, hba->uic_link_state);
10188
10189 return ret;
10190 }
10191 EXPORT_SYMBOL(ufshcd_system_resume);
10192 #endif /* CONFIG_PM_SLEEP */
10193
10194 #ifdef CONFIG_PM
10195 /**
10196 * ufshcd_runtime_suspend - runtime suspend callback
10197 * @dev: Device associated with the UFS controller.
10198 *
10199 * Check the description of ufshcd_suspend() function for more details.
10200 *
10201 * Return: 0 for success and non-zero for failure.
10202 */
ufshcd_runtime_suspend(struct device * dev)10203 int ufshcd_runtime_suspend(struct device *dev)
10204 {
10205 struct ufs_hba *hba = dev_get_drvdata(dev);
10206 int ret;
10207 ktime_t start = ktime_get();
10208
10209 ret = ufshcd_suspend(hba);
10210
10211 trace_ufshcd_runtime_suspend(hba, ret,
10212 ktime_to_us(ktime_sub(ktime_get(), start)),
10213 hba->curr_dev_pwr_mode, hba->uic_link_state);
10214 return ret;
10215 }
10216 EXPORT_SYMBOL(ufshcd_runtime_suspend);
10217
10218 /**
10219 * ufshcd_runtime_resume - runtime resume routine
10220 * @dev: Device associated with the UFS controller.
10221 *
10222 * This function basically brings controller
10223 * to active state. Following operations are done in this function:
10224 *
10225 * 1. Turn on all the controller related clocks
10226 * 2. Turn ON VCC rail
10227 *
10228 * Return: 0 upon success; < 0 upon failure.
10229 */
ufshcd_runtime_resume(struct device * dev)10230 int ufshcd_runtime_resume(struct device *dev)
10231 {
10232 struct ufs_hba *hba = dev_get_drvdata(dev);
10233 int ret;
10234 ktime_t start = ktime_get();
10235
10236 ret = ufshcd_resume(hba);
10237
10238 trace_ufshcd_runtime_resume(hba, ret,
10239 ktime_to_us(ktime_sub(ktime_get(), start)),
10240 hba->curr_dev_pwr_mode, hba->uic_link_state);
10241 return ret;
10242 }
10243 EXPORT_SYMBOL(ufshcd_runtime_resume);
10244 #endif /* CONFIG_PM */
10245
ufshcd_wl_shutdown(struct device * dev)10246 static void ufshcd_wl_shutdown(struct device *dev)
10247 {
10248 struct scsi_device *sdev = to_scsi_device(dev);
10249 struct ufs_hba *hba = shost_priv(sdev->host);
10250
10251 down(&hba->host_sem);
10252 hba->shutting_down = true;
10253 up(&hba->host_sem);
10254
10255 /* Turn on everything while shutting down */
10256 ufshcd_rpm_get_sync(hba);
10257 scsi_device_quiesce(sdev);
10258 shost_for_each_device(sdev, hba->host) {
10259 if (sdev == hba->ufs_device_wlun)
10260 continue;
10261 mutex_lock(&sdev->state_mutex);
10262 scsi_device_set_state(sdev, SDEV_OFFLINE);
10263 mutex_unlock(&sdev->state_mutex);
10264 }
10265 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10266
10267 /*
10268 * Next, turn off the UFS controller and the UFS regulators. Disable
10269 * clocks.
10270 */
10271 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
10272 ufshcd_suspend(hba);
10273
10274 hba->is_powered = false;
10275 }
10276
10277 /**
10278 * ufshcd_remove - de-allocate SCSI host and host memory space
10279 * data structure memory
10280 * @hba: per adapter instance
10281 */
ufshcd_remove(struct ufs_hba * hba)10282 void ufshcd_remove(struct ufs_hba *hba)
10283 {
10284 if (hba->ufs_device_wlun)
10285 ufshcd_rpm_get_sync(hba);
10286 ufs_hwmon_remove(hba);
10287 ufs_bsg_remove(hba);
10288 ufs_sysfs_remove_nodes(hba->dev);
10289 cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
10290 blk_mq_destroy_queue(hba->tmf_queue);
10291 blk_put_queue(hba->tmf_queue);
10292 blk_mq_free_tag_set(&hba->tmf_tag_set);
10293 if (hba->scsi_host_added)
10294 scsi_remove_host(hba->host);
10295 /* disable interrupts */
10296 ufshcd_disable_intr(hba, hba->intr_mask);
10297 ufshcd_hba_stop(hba);
10298 ufshcd_hba_exit(hba);
10299 }
10300 EXPORT_SYMBOL_GPL(ufshcd_remove);
10301
10302 #ifdef CONFIG_PM_SLEEP
ufshcd_system_freeze(struct device * dev)10303 int ufshcd_system_freeze(struct device *dev)
10304 {
10305
10306 return ufshcd_system_suspend(dev);
10307
10308 }
10309 EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
10310
ufshcd_system_restore(struct device * dev)10311 int ufshcd_system_restore(struct device *dev)
10312 {
10313
10314 struct ufs_hba *hba = dev_get_drvdata(dev);
10315 int ret;
10316
10317 ret = ufshcd_system_resume(dev);
10318 if (ret)
10319 return ret;
10320
10321 /* Configure UTRL and UTMRL base address registers */
10322 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
10323 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
10324 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
10325 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
10326 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
10327 REG_UTP_TASK_REQ_LIST_BASE_L);
10328 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
10329 REG_UTP_TASK_REQ_LIST_BASE_H);
10330 /*
10331 * Make sure that UTRL and UTMRL base address registers
10332 * are updated with the latest queue addresses. Only after
10333 * updating these addresses, we can queue the new commands.
10334 */
10335 ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
10336
10337 return 0;
10338
10339 }
10340 EXPORT_SYMBOL_GPL(ufshcd_system_restore);
10341
ufshcd_system_thaw(struct device * dev)10342 int ufshcd_system_thaw(struct device *dev)
10343 {
10344 return ufshcd_system_resume(dev);
10345 }
10346 EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
10347 #endif /* CONFIG_PM_SLEEP */
10348
10349 /**
10350 * ufshcd_set_dma_mask - Set dma mask based on the controller
10351 * addressing capability
10352 * @hba: per adapter instance
10353 *
10354 * Return: 0 for success, non-zero for failure.
10355 */
ufshcd_set_dma_mask(struct ufs_hba * hba)10356 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
10357 {
10358 if (hba->vops && hba->vops->set_dma_mask)
10359 return hba->vops->set_dma_mask(hba);
10360 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
10361 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
10362 return 0;
10363 }
10364 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
10365 }
10366
10367 /**
10368 * ufshcd_devres_release - devres cleanup handler, invoked during release of
10369 * hba->dev
10370 * @host: pointer to SCSI host
10371 */
ufshcd_devres_release(void * host)10372 static void ufshcd_devres_release(void *host)
10373 {
10374 scsi_host_put(host);
10375 }
10376
10377 /**
10378 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10379 * @dev: pointer to device handle
10380 * @hba_handle: driver private handle
10381 *
10382 * Return: 0 on success, non-zero value on failure.
10383 *
10384 * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
10385 * keeps track of its allocations using devres and deallocates everything on
10386 * device removal automatically.
10387 */
ufshcd_alloc_host(struct device * dev,struct ufs_hba ** hba_handle)10388 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
10389 {
10390 struct Scsi_Host *host;
10391 struct ufs_hba *hba;
10392 int err = 0;
10393
10394 if (!dev) {
10395 dev_err(dev,
10396 "Invalid memory reference for dev is NULL\n");
10397 err = -ENODEV;
10398 goto out_error;
10399 }
10400
10401 host = scsi_host_alloc(&ufshcd_driver_template,
10402 sizeof(struct ufs_hba));
10403 if (!host) {
10404 dev_err(dev, "scsi_host_alloc failed\n");
10405 err = -ENOMEM;
10406 goto out_error;
10407 }
10408
10409 err = devm_add_action_or_reset(dev, ufshcd_devres_release,
10410 host);
10411 if (err)
10412 return dev_err_probe(dev, err,
10413 "failed to add ufshcd dealloc action\n");
10414
10415 host->nr_maps = HCTX_TYPE_POLL + 1;
10416 hba = shost_priv(host);
10417 hba->host = host;
10418 hba->dev = dev;
10419 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
10420 hba->nop_out_timeout = NOP_OUT_TIMEOUT;
10421 ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
10422 INIT_LIST_HEAD(&hba->clk_list_head);
10423 spin_lock_init(&hba->outstanding_lock);
10424
10425 *hba_handle = hba;
10426
10427 out_error:
10428 return err;
10429 }
10430 EXPORT_SYMBOL(ufshcd_alloc_host);
10431
10432 /* This function exists because blk_mq_alloc_tag_set() requires this. */
ufshcd_queue_tmf(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)10433 static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
10434 const struct blk_mq_queue_data *qd)
10435 {
10436 WARN_ON_ONCE(true);
10437 return BLK_STS_NOTSUPP;
10438 }
10439
10440 static const struct blk_mq_ops ufshcd_tmf_ops = {
10441 .queue_rq = ufshcd_queue_tmf,
10442 };
10443
ufshcd_add_scsi_host(struct ufs_hba * hba)10444 static int ufshcd_add_scsi_host(struct ufs_hba *hba)
10445 {
10446 int err;
10447
10448 if (is_mcq_supported(hba)) {
10449 ufshcd_mcq_enable(hba);
10450 err = ufshcd_alloc_mcq(hba);
10451 if (!err) {
10452 ufshcd_config_mcq(hba);
10453 } else {
10454 /* Continue with SDB mode */
10455 ufshcd_mcq_disable(hba);
10456 use_mcq_mode = false;
10457 dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
10458 err);
10459 }
10460 }
10461 if (!is_mcq_supported(hba) && !hba->lsdb_sup) {
10462 dev_err(hba->dev,
10463 "%s: failed to initialize (legacy doorbell mode not supported)\n",
10464 __func__);
10465 return -EINVAL;
10466 }
10467
10468 err = scsi_add_host(hba->host, hba->dev);
10469 if (err) {
10470 dev_err(hba->dev, "scsi_add_host failed\n");
10471 return err;
10472 }
10473 hba->scsi_host_added = true;
10474
10475 hba->tmf_tag_set = (struct blk_mq_tag_set) {
10476 .nr_hw_queues = 1,
10477 .queue_depth = hba->nutmrs,
10478 .ops = &ufshcd_tmf_ops,
10479 };
10480 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
10481 if (err < 0)
10482 goto remove_scsi_host;
10483 hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
10484 if (IS_ERR(hba->tmf_queue)) {
10485 err = PTR_ERR(hba->tmf_queue);
10486 goto free_tmf_tag_set;
10487 }
10488 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
10489 sizeof(*hba->tmf_rqs), GFP_KERNEL);
10490 if (!hba->tmf_rqs) {
10491 err = -ENOMEM;
10492 goto free_tmf_queue;
10493 }
10494
10495 return 0;
10496
10497 free_tmf_queue:
10498 blk_mq_destroy_queue(hba->tmf_queue);
10499 blk_put_queue(hba->tmf_queue);
10500
10501 free_tmf_tag_set:
10502 blk_mq_free_tag_set(&hba->tmf_tag_set);
10503
10504 remove_scsi_host:
10505 if (hba->scsi_host_added)
10506 scsi_remove_host(hba->host);
10507
10508 return err;
10509 }
10510
10511 /**
10512 * ufshcd_init - Driver initialization routine
10513 * @hba: per-adapter instance
10514 * @mmio_base: base register address
10515 * @irq: Interrupt line of device
10516 *
10517 * Return: 0 on success, non-zero value on failure.
10518 */
ufshcd_init(struct ufs_hba * hba,void __iomem * mmio_base,unsigned int irq)10519 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
10520 {
10521 int err;
10522 struct Scsi_Host *host = hba->host;
10523 struct device *dev = hba->dev;
10524
10525 /*
10526 * dev_set_drvdata() must be called before any callbacks are registered
10527 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10528 * sysfs).
10529 */
10530 dev_set_drvdata(dev, hba);
10531
10532 if (!mmio_base) {
10533 dev_err(hba->dev,
10534 "Invalid memory reference for mmio_base is NULL\n");
10535 err = -ENODEV;
10536 goto out_error;
10537 }
10538
10539 hba->mmio_base = mmio_base;
10540 hba->irq = irq;
10541 hba->vps = &ufs_hba_vps;
10542
10543 /*
10544 * Initialize clk_gating.lock early since it is being used in
10545 * ufshcd_setup_clocks()
10546 */
10547 spin_lock_init(&hba->clk_gating.lock);
10548
10549 /*
10550 * Set the default power management level for runtime and system PM.
10551 * Host controller drivers can override them in their
10552 * 'ufs_hba_variant_ops::init' callback.
10553 *
10554 * Default power saving mode is to keep UFS link in Hibern8 state
10555 * and UFS device in sleep state.
10556 */
10557 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10558 UFS_SLEEP_PWR_MODE,
10559 UIC_LINK_HIBERN8_STATE);
10560 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
10561 UFS_SLEEP_PWR_MODE,
10562 UIC_LINK_HIBERN8_STATE);
10563
10564 init_completion(&hba->dev_cmd.complete);
10565
10566 err = ufshcd_hba_init(hba);
10567 if (err)
10568 goto out_error;
10569
10570 /* Read capabilities registers */
10571 err = ufshcd_hba_capabilities(hba);
10572 if (err)
10573 goto out_disable;
10574
10575 /* Get UFS version supported by the controller */
10576 hba->ufs_version = ufshcd_get_ufs_version(hba);
10577
10578 /* Get Interrupt bit mask per version */
10579 hba->intr_mask = ufshcd_get_intr_mask(hba);
10580
10581 err = ufshcd_set_dma_mask(hba);
10582 if (err) {
10583 dev_err(hba->dev, "set dma mask failed\n");
10584 goto out_disable;
10585 }
10586
10587 /* Allocate memory for host memory space */
10588 err = ufshcd_memory_alloc(hba);
10589 if (err) {
10590 dev_err(hba->dev, "Memory allocation failed\n");
10591 goto out_disable;
10592 }
10593
10594 /* Configure LRB */
10595 ufshcd_host_memory_configure(hba);
10596
10597 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
10598 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
10599 host->max_id = UFSHCD_MAX_ID;
10600 host->max_lun = UFS_MAX_LUNS;
10601 host->max_channel = UFSHCD_MAX_CHANNEL;
10602 host->unique_id = host->host_no;
10603 host->max_cmd_len = UFS_CDB_SIZE;
10604 host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
10605
10606 /* Use default RPM delay if host not set */
10607 if (host->rpm_autosuspend_delay == 0)
10608 host->rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS;
10609
10610 hba->max_pwr_info.is_valid = false;
10611
10612 /* Initialize work queues */
10613 hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
10614 hba->host->host_no);
10615 if (!hba->eh_wq) {
10616 dev_err(hba->dev, "%s: failed to create eh workqueue\n",
10617 __func__);
10618 err = -ENOMEM;
10619 goto out_disable;
10620 }
10621 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
10622 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
10623
10624 sema_init(&hba->host_sem, 1);
10625
10626 /* Initialize UIC command mutex */
10627 mutex_init(&hba->uic_cmd_mutex);
10628
10629 /* Initialize mutex for device management commands */
10630 mutex_init(&hba->dev_cmd.lock);
10631
10632 /* Initialize mutex for exception event control */
10633 mutex_init(&hba->ee_ctrl_mutex);
10634
10635 mutex_init(&hba->wb_mutex);
10636 init_rwsem(&hba->clk_scaling_lock);
10637
10638 ufshcd_init_clk_gating(hba);
10639
10640 ufshcd_init_clk_scaling(hba);
10641
10642 /*
10643 * In order to avoid any spurious interrupt immediately after
10644 * registering UFS controller interrupt handler, clear any pending UFS
10645 * interrupt status and disable all the UFS interrupts.
10646 */
10647 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
10648 REG_INTERRUPT_STATUS);
10649 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
10650 /*
10651 * Make sure that UFS interrupts are disabled and any pending interrupt
10652 * status is cleared before registering UFS interrupt handler.
10653 */
10654 ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
10655
10656 /* IRQ registration */
10657 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
10658 if (err) {
10659 dev_err(hba->dev, "request irq failed\n");
10660 goto out_disable;
10661 } else {
10662 hba->is_irq_enabled = true;
10663 }
10664
10665 /* Reset the attached device */
10666 ufshcd_device_reset(hba);
10667
10668 ufshcd_init_crypto(hba);
10669
10670 /* Host controller enable */
10671 err = ufshcd_hba_enable(hba);
10672 if (err) {
10673 dev_err(hba->dev, "Host controller enable failed\n");
10674 ufshcd_print_evt_hist(hba);
10675 ufshcd_print_host_state(hba);
10676 goto out_disable;
10677 }
10678
10679 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
10680 INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
10681
10682 /* Set the default auto-hiberate idle timer value to 150 ms */
10683 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
10684 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
10685 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
10686 }
10687
10688 /* Hold auto suspend until async scan completes */
10689 pm_runtime_get_sync(dev);
10690
10691 /*
10692 * We are assuming that device wasn't put in sleep/power-down
10693 * state exclusively during the boot stage before kernel.
10694 * This assumption helps avoid doing link startup twice during
10695 * ufshcd_probe_hba().
10696 */
10697 ufshcd_set_ufs_dev_active(hba);
10698
10699 /* Initialize hba, detect and initialize UFS device */
10700 ktime_t probe_start = ktime_get();
10701
10702 hba->ufshcd_state = UFSHCD_STATE_RESET;
10703
10704 err = ufshcd_link_startup(hba);
10705 if (err)
10706 goto out_disable;
10707
10708 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
10709 goto initialized;
10710
10711 /* Debug counters initialization */
10712 ufshcd_clear_dbg_ufs_stats(hba);
10713
10714 /* UniPro link is active now */
10715 ufshcd_set_link_active(hba);
10716
10717 /* Verify device initialization by sending NOP OUT UPIU */
10718 err = ufshcd_verify_dev_init(hba);
10719 if (err)
10720 goto out_disable;
10721
10722 /* Initiate UFS initialization, and waiting until completion */
10723 err = ufshcd_complete_dev_init(hba);
10724 if (err)
10725 goto out_disable;
10726
10727 err = ufshcd_device_params_init(hba);
10728 if (err)
10729 goto out_disable;
10730
10731 err = ufshcd_post_device_init(hba);
10732
10733 initialized:
10734 ufshcd_process_probe_result(hba, probe_start, err);
10735 if (err)
10736 goto out_disable;
10737
10738 err = ufshcd_add_scsi_host(hba);
10739 if (err)
10740 goto out_disable;
10741
10742 async_schedule(ufshcd_async_scan, hba);
10743 ufs_sysfs_add_nodes(hba->dev);
10744
10745 device_enable_async_suspend(dev);
10746 ufshcd_pm_qos_init(hba);
10747 return 0;
10748
10749 out_disable:
10750 hba->is_irq_enabled = false;
10751 ufshcd_hba_exit(hba);
10752 out_error:
10753 return err;
10754 }
10755 EXPORT_SYMBOL_GPL(ufshcd_init);
10756
ufshcd_resume_complete(struct device * dev)10757 void ufshcd_resume_complete(struct device *dev)
10758 {
10759 struct ufs_hba *hba = dev_get_drvdata(dev);
10760
10761 if (hba->complete_put) {
10762 ufshcd_rpm_put(hba);
10763 hba->complete_put = false;
10764 }
10765 }
10766 EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
10767
ufshcd_rpm_ok_for_spm(struct ufs_hba * hba)10768 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
10769 {
10770 struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
10771 enum ufs_dev_pwr_mode dev_pwr_mode;
10772 enum uic_link_state link_state;
10773 unsigned long flags;
10774 bool res;
10775
10776 spin_lock_irqsave(&dev->power.lock, flags);
10777 dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
10778 link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
10779 res = pm_runtime_suspended(dev) &&
10780 hba->curr_dev_pwr_mode == dev_pwr_mode &&
10781 hba->uic_link_state == link_state &&
10782 !hba->dev_info.b_rpm_dev_flush_capable;
10783 spin_unlock_irqrestore(&dev->power.lock, flags);
10784
10785 return res;
10786 }
10787
__ufshcd_suspend_prepare(struct device * dev,bool rpm_ok_for_spm)10788 int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
10789 {
10790 struct ufs_hba *hba = dev_get_drvdata(dev);
10791 int ret;
10792
10793 /*
10794 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10795 * are same. And it doesn't wake up the device for system-suspend
10796 * if it's runtime suspended. But ufs doesn't follow that.
10797 * Refer ufshcd_resume_complete()
10798 */
10799 if (hba->ufs_device_wlun) {
10800 /* Prevent runtime suspend */
10801 ufshcd_rpm_get_noresume(hba);
10802 /*
10803 * Check if already runtime suspended in same state as system
10804 * suspend would be.
10805 */
10806 if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
10807 /* RPM state is not ok for SPM, so runtime resume */
10808 ret = ufshcd_rpm_resume(hba);
10809 if (ret < 0 && ret != -EACCES) {
10810 ufshcd_rpm_put(hba);
10811 return ret;
10812 }
10813 }
10814 hba->complete_put = true;
10815 }
10816 return 0;
10817 }
10818 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
10819
ufshcd_suspend_prepare(struct device * dev)10820 int ufshcd_suspend_prepare(struct device *dev)
10821 {
10822 return __ufshcd_suspend_prepare(dev, true);
10823 }
10824 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
10825
10826 #ifdef CONFIG_PM_SLEEP
ufshcd_wl_poweroff(struct device * dev)10827 static int ufshcd_wl_poweroff(struct device *dev)
10828 {
10829 struct scsi_device *sdev = to_scsi_device(dev);
10830 struct ufs_hba *hba = shost_priv(sdev->host);
10831
10832 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
10833 return 0;
10834 }
10835 #endif
10836
ufshcd_wl_probe(struct device * dev)10837 static int ufshcd_wl_probe(struct device *dev)
10838 {
10839 struct scsi_device *sdev = to_scsi_device(dev);
10840
10841 if (!is_device_wlun(sdev))
10842 return -ENODEV;
10843
10844 blk_pm_runtime_init(sdev->request_queue, dev);
10845 pm_runtime_set_autosuspend_delay(dev, 0);
10846 pm_runtime_allow(dev);
10847
10848 return 0;
10849 }
10850
ufshcd_wl_remove(struct device * dev)10851 static int ufshcd_wl_remove(struct device *dev)
10852 {
10853 pm_runtime_forbid(dev);
10854 return 0;
10855 }
10856
10857 static const struct dev_pm_ops ufshcd_wl_pm_ops = {
10858 #ifdef CONFIG_PM_SLEEP
10859 .suspend = ufshcd_wl_suspend,
10860 .resume = ufshcd_wl_resume,
10861 .freeze = ufshcd_wl_suspend,
10862 .thaw = ufshcd_wl_resume,
10863 .poweroff = ufshcd_wl_poweroff,
10864 .restore = ufshcd_wl_resume,
10865 #endif
10866 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
10867 };
10868
ufshcd_check_header_layout(void)10869 static void ufshcd_check_header_layout(void)
10870 {
10871 /*
10872 * gcc compilers before version 10 cannot do constant-folding for
10873 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10874 * before.
10875 */
10876 if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
10877 return;
10878
10879 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10880 .cci = 3})[0] != 3);
10881
10882 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10883 .ehs_length = 2})[1] != 2);
10884
10885 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10886 .enable_crypto = 1})[2]
10887 != 0x80);
10888
10889 BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
10890 .command_type = 5,
10891 .data_direction = 3,
10892 .interrupt = 1,
10893 })[3]) != ((5 << 4) | (3 << 1) | 1));
10894
10895 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10896 .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
10897 cpu_to_le32(0xdeadbeef));
10898
10899 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10900 .ocs = 4})[8] != 4);
10901
10902 BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
10903 .cds = 5})[9] != 5);
10904
10905 BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
10906 .dunu = cpu_to_le32(0xbadcafe)})[3] !=
10907 cpu_to_le32(0xbadcafe));
10908
10909 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10910 .iid = 0xf })[4] != 0xf0);
10911
10912 BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
10913 .command_set_type = 0xf })[4] != 0xf);
10914 }
10915
10916 /*
10917 * ufs_dev_wlun_template - describes ufs device wlun
10918 * ufs-device wlun - used to send pm commands
10919 * All luns are consumers of ufs-device wlun.
10920 *
10921 * Currently, no sd driver is present for wluns.
10922 * Hence the no specific pm operations are performed.
10923 * With ufs design, SSU should be sent to ufs-device wlun.
10924 * Hence register a scsi driver for ufs wluns only.
10925 */
10926 static struct scsi_driver ufs_dev_wlun_template = {
10927 .gendrv = {
10928 .name = "ufs_device_wlun",
10929 .probe = ufshcd_wl_probe,
10930 .remove = ufshcd_wl_remove,
10931 .pm = &ufshcd_wl_pm_ops,
10932 .shutdown = ufshcd_wl_shutdown,
10933 },
10934 };
10935
ufshcd_core_init(void)10936 static int __init ufshcd_core_init(void)
10937 {
10938 int ret;
10939
10940 ufshcd_check_header_layout();
10941
10942 ufs_debugfs_init();
10943
10944 ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
10945 if (ret)
10946 ufs_debugfs_exit();
10947 return ret;
10948 }
10949
ufshcd_core_exit(void)10950 static void __exit ufshcd_core_exit(void)
10951 {
10952 ufs_debugfs_exit();
10953 scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
10954 }
10955
10956 module_init(ufshcd_core_init);
10957 module_exit(ufshcd_core_exit);
10958
10959 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10960 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10961 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10962 MODULE_SOFTDEP("pre: governor_simpleondemand");
10963 MODULE_LICENSE("GPL");
10964