1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
3  *
4  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5  *
6  * Author:
7  *   Wei WANG <wei_wang@realsil.com.cn>
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <linux/unaligned.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "rtsx_pcr.h"
27 #include "rts5261.h"
28 #include "rts5228.h"
29 #include "rts5264.h"
30 
31 static bool msi_en = true;
32 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
33 MODULE_PARM_DESC(msi_en, "Enable MSI");
34 
35 static DEFINE_IDR(rtsx_pci_idr);
36 static DEFINE_SPINLOCK(rtsx_pci_lock);
37 
38 static struct mfd_cell rtsx_pcr_cells[] = {
39 	[RTSX_SD_CARD] = {
40 		.name = DRV_NAME_RTSX_PCI_SDMMC,
41 	},
42 };
43 
44 static const struct pci_device_id rtsx_pci_ids[] = {
45 	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 	{ PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 	{ 0, }
60 };
61 
62 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
63 
64 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
65 {
66 	rtsx_pci_write_register(pcr, MSGTXDATA0,
67 				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
68 	rtsx_pci_write_register(pcr, MSGTXDATA1,
69 				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
70 	rtsx_pci_write_register(pcr, MSGTXDATA2,
71 				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
72 	rtsx_pci_write_register(pcr, MSGTXDATA3,
73 				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
74 	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
75 		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
76 
77 	return 0;
78 }
79 
80 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
81 {
82 	return rtsx_comm_set_ltr_latency(pcr, latency);
83 }
84 
85 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
86 {
87 	if (pcr->aspm_enabled == enable)
88 		return;
89 
90 	if (pcr->aspm_mode == ASPM_MODE_CFG) {
91 		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
92 						PCI_EXP_LNKCTL_ASPMC,
93 						enable ? pcr->aspm_en : 0);
94 	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
95 		if (pcr->aspm_en & 0x02)
96 			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
97 				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
98 		else
99 			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
100 				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
101 	}
102 
103 	if (!enable && (pcr->aspm_en & 0x02))
104 		mdelay(10);
105 
106 	pcr->aspm_enabled = enable;
107 }
108 
109 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
110 {
111 	if (pcr->ops->set_aspm)
112 		pcr->ops->set_aspm(pcr, false);
113 	else
114 		rtsx_comm_set_aspm(pcr, false);
115 }
116 
117 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
118 {
119 	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
120 
121 	return 0;
122 }
123 
124 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
125 {
126 	if (pcr->ops->set_l1off_cfg_sub_d0)
127 		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
128 }
129 
130 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
131 {
132 	struct rtsx_cr_option *option = &pcr->option;
133 
134 	rtsx_disable_aspm(pcr);
135 
136 	/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
137 	msleep(1);
138 
139 	if (option->ltr_enabled)
140 		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
141 
142 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
143 		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
144 }
145 
146 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
147 {
148 	rtsx_comm_pm_full_on(pcr);
149 }
150 
151 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
152 {
153 	/* If pci device removed, don't queue idle work any more */
154 	if (pcr->remove_pci)
155 		return;
156 
157 	if (pcr->state != PDEV_STAT_RUN) {
158 		pcr->state = PDEV_STAT_RUN;
159 		if (pcr->ops->enable_auto_blink)
160 			pcr->ops->enable_auto_blink(pcr);
161 		rtsx_pm_full_on(pcr);
162 	}
163 }
164 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
165 
166 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
167 {
168 	int i;
169 	u32 val = HAIMR_WRITE_START;
170 
171 	val |= (u32)(addr & 0x3FFF) << 16;
172 	val |= (u32)mask << 8;
173 	val |= (u32)data;
174 
175 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
176 
177 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
178 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
179 		if ((val & HAIMR_TRANS_END) == 0) {
180 			if (data != (u8)val)
181 				return -EIO;
182 			return 0;
183 		}
184 	}
185 
186 	return -ETIMEDOUT;
187 }
188 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
189 
190 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
191 {
192 	u32 val = HAIMR_READ_START;
193 	int i;
194 
195 	val |= (u32)(addr & 0x3FFF) << 16;
196 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
197 
198 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
199 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
200 		if ((val & HAIMR_TRANS_END) == 0)
201 			break;
202 	}
203 
204 	if (i >= MAX_RW_REG_CNT)
205 		return -ETIMEDOUT;
206 
207 	if (data)
208 		*data = (u8)(val & 0xFF);
209 
210 	return 0;
211 }
212 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
213 
214 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
215 {
216 	int err, i, finished = 0;
217 	u8 tmp;
218 
219 	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
220 	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
221 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
222 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
223 
224 	for (i = 0; i < 100000; i++) {
225 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
226 		if (err < 0)
227 			return err;
228 
229 		if (!(tmp & 0x80)) {
230 			finished = 1;
231 			break;
232 		}
233 	}
234 
235 	if (!finished)
236 		return -ETIMEDOUT;
237 
238 	return 0;
239 }
240 
241 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
242 {
243 	if (pcr->ops->write_phy)
244 		return pcr->ops->write_phy(pcr, addr, val);
245 
246 	return __rtsx_pci_write_phy_register(pcr, addr, val);
247 }
248 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
249 
250 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
251 {
252 	int err, i, finished = 0;
253 	u16 data;
254 	u8 tmp, val1, val2;
255 
256 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
257 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
258 
259 	for (i = 0; i < 100000; i++) {
260 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
261 		if (err < 0)
262 			return err;
263 
264 		if (!(tmp & 0x80)) {
265 			finished = 1;
266 			break;
267 		}
268 	}
269 
270 	if (!finished)
271 		return -ETIMEDOUT;
272 
273 	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
274 	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
275 	data = val1 | (val2 << 8);
276 
277 	if (val)
278 		*val = data;
279 
280 	return 0;
281 }
282 
283 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
284 {
285 	if (pcr->ops->read_phy)
286 		return pcr->ops->read_phy(pcr, addr, val);
287 
288 	return __rtsx_pci_read_phy_register(pcr, addr, val);
289 }
290 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
291 
292 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
293 {
294 	if (pcr->ops->stop_cmd)
295 		return pcr->ops->stop_cmd(pcr);
296 
297 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
298 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
299 
300 	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
301 	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
302 }
303 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
304 
305 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
306 		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
307 {
308 	unsigned long flags;
309 	u32 val = 0;
310 	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
311 
312 	val |= (u32)(cmd_type & 0x03) << 30;
313 	val |= (u32)(reg_addr & 0x3FFF) << 16;
314 	val |= (u32)mask << 8;
315 	val |= (u32)data;
316 
317 	spin_lock_irqsave(&pcr->lock, flags);
318 	ptr += pcr->ci;
319 	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
320 		put_unaligned_le32(val, ptr);
321 		ptr++;
322 		pcr->ci++;
323 	}
324 	spin_unlock_irqrestore(&pcr->lock, flags);
325 }
326 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
327 
328 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
329 {
330 	u32 val = 1 << 31;
331 
332 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
333 
334 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
335 	/* Hardware Auto Response */
336 	val |= 0x40000000;
337 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
338 }
339 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
340 
341 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
342 {
343 	struct completion trans_done;
344 	u32 val = 1 << 31;
345 	long timeleft;
346 	unsigned long flags;
347 	int err = 0;
348 
349 	spin_lock_irqsave(&pcr->lock, flags);
350 
351 	/* set up data structures for the wakeup system */
352 	pcr->done = &trans_done;
353 	pcr->trans_result = TRANS_NOT_READY;
354 	init_completion(&trans_done);
355 
356 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
357 
358 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
359 	/* Hardware Auto Response */
360 	val |= 0x40000000;
361 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
362 
363 	spin_unlock_irqrestore(&pcr->lock, flags);
364 
365 	/* Wait for TRANS_OK_INT */
366 	timeleft = wait_for_completion_interruptible_timeout(
367 			&trans_done, msecs_to_jiffies(timeout));
368 	if (timeleft <= 0) {
369 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
370 		err = -ETIMEDOUT;
371 		goto finish_send_cmd;
372 	}
373 
374 	spin_lock_irqsave(&pcr->lock, flags);
375 	if (pcr->trans_result == TRANS_RESULT_FAIL)
376 		err = -EINVAL;
377 	else if (pcr->trans_result == TRANS_RESULT_OK)
378 		err = 0;
379 	else if (pcr->trans_result == TRANS_NO_DEVICE)
380 		err = -ENODEV;
381 	spin_unlock_irqrestore(&pcr->lock, flags);
382 
383 finish_send_cmd:
384 	spin_lock_irqsave(&pcr->lock, flags);
385 	pcr->done = NULL;
386 	spin_unlock_irqrestore(&pcr->lock, flags);
387 
388 	if ((err < 0) && (err != -ENODEV))
389 		rtsx_pci_stop_cmd(pcr);
390 
391 	if (pcr->finish_me)
392 		complete(pcr->finish_me);
393 
394 	return err;
395 }
396 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
397 
398 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
399 		dma_addr_t addr, unsigned int len, int end)
400 {
401 	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
402 	u64 val;
403 	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
404 
405 	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
406 
407 	if (end)
408 		option |= RTSX_SG_END;
409 
410 	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
411 		if (len > 0xFFFF)
412 			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
413 				| (((u64)len >> 16) << 6) | option;
414 		else
415 			val = ((u64)addr << 32) | ((u64)len << 16) | option;
416 	} else {
417 		val = ((u64)addr << 32) | ((u64)len << 12) | option;
418 	}
419 	put_unaligned_le64(val, ptr);
420 	pcr->sgi++;
421 }
422 
423 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
424 		int num_sg, bool read)
425 {
426 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
427 
428 	if (pcr->remove_pci)
429 		return -EINVAL;
430 
431 	if ((sglist == NULL) || (num_sg <= 0))
432 		return -EINVAL;
433 
434 	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
435 }
436 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
437 
438 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
439 		int num_sg, bool read)
440 {
441 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
442 
443 	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
444 }
445 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
446 
447 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
448 		int count, bool read, int timeout)
449 {
450 	struct completion trans_done;
451 	struct scatterlist *sg;
452 	dma_addr_t addr;
453 	long timeleft;
454 	unsigned long flags;
455 	unsigned int len;
456 	int i, err = 0;
457 	u32 val;
458 	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
459 
460 	if (pcr->remove_pci)
461 		return -ENODEV;
462 
463 	if ((sglist == NULL) || (count < 1))
464 		return -EINVAL;
465 
466 	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
467 	pcr->sgi = 0;
468 	for_each_sg(sglist, sg, count, i) {
469 		addr = sg_dma_address(sg);
470 		len = sg_dma_len(sg);
471 		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
472 	}
473 
474 	spin_lock_irqsave(&pcr->lock, flags);
475 
476 	pcr->done = &trans_done;
477 	pcr->trans_result = TRANS_NOT_READY;
478 	init_completion(&trans_done);
479 	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
480 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
481 
482 	spin_unlock_irqrestore(&pcr->lock, flags);
483 
484 	timeleft = wait_for_completion_interruptible_timeout(
485 			&trans_done, msecs_to_jiffies(timeout));
486 	if (timeleft <= 0) {
487 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
488 		err = -ETIMEDOUT;
489 		goto out;
490 	}
491 
492 	spin_lock_irqsave(&pcr->lock, flags);
493 	if (pcr->trans_result == TRANS_RESULT_FAIL) {
494 		err = -EILSEQ;
495 		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
496 			pcr->dma_error_count++;
497 	}
498 
499 	else if (pcr->trans_result == TRANS_NO_DEVICE)
500 		err = -ENODEV;
501 	spin_unlock_irqrestore(&pcr->lock, flags);
502 
503 out:
504 	spin_lock_irqsave(&pcr->lock, flags);
505 	pcr->done = NULL;
506 	spin_unlock_irqrestore(&pcr->lock, flags);
507 
508 	if ((err < 0) && (err != -ENODEV))
509 		rtsx_pci_stop_cmd(pcr);
510 
511 	if (pcr->finish_me)
512 		complete(pcr->finish_me);
513 
514 	return err;
515 }
516 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
517 
518 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
519 {
520 	int err;
521 	int i, j;
522 	u16 reg;
523 	u8 *ptr;
524 
525 	if (buf_len > 512)
526 		buf_len = 512;
527 
528 	ptr = buf;
529 	reg = PPBUF_BASE2;
530 	for (i = 0; i < buf_len / 256; i++) {
531 		rtsx_pci_init_cmd(pcr);
532 
533 		for (j = 0; j < 256; j++)
534 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
535 
536 		err = rtsx_pci_send_cmd(pcr, 250);
537 		if (err < 0)
538 			return err;
539 
540 		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
541 		ptr += 256;
542 	}
543 
544 	if (buf_len % 256) {
545 		rtsx_pci_init_cmd(pcr);
546 
547 		for (j = 0; j < buf_len % 256; j++)
548 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
549 
550 		err = rtsx_pci_send_cmd(pcr, 250);
551 		if (err < 0)
552 			return err;
553 	}
554 
555 	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
560 
561 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
562 {
563 	int err;
564 	int i, j;
565 	u16 reg;
566 	u8 *ptr;
567 
568 	if (buf_len > 512)
569 		buf_len = 512;
570 
571 	ptr = buf;
572 	reg = PPBUF_BASE2;
573 	for (i = 0; i < buf_len / 256; i++) {
574 		rtsx_pci_init_cmd(pcr);
575 
576 		for (j = 0; j < 256; j++) {
577 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
578 					reg++, 0xFF, *ptr);
579 			ptr++;
580 		}
581 
582 		err = rtsx_pci_send_cmd(pcr, 250);
583 		if (err < 0)
584 			return err;
585 	}
586 
587 	if (buf_len % 256) {
588 		rtsx_pci_init_cmd(pcr);
589 
590 		for (j = 0; j < buf_len % 256; j++) {
591 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
592 					reg++, 0xFF, *ptr);
593 			ptr++;
594 		}
595 
596 		err = rtsx_pci_send_cmd(pcr, 250);
597 		if (err < 0)
598 			return err;
599 	}
600 
601 	return 0;
602 }
603 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
604 
605 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
606 {
607 	rtsx_pci_init_cmd(pcr);
608 
609 	while (*tbl & 0xFFFF0000) {
610 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
611 				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
612 		tbl++;
613 	}
614 
615 	return rtsx_pci_send_cmd(pcr, 100);
616 }
617 
618 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
619 {
620 	const u32 *tbl;
621 
622 	if (card == RTSX_SD_CARD)
623 		tbl = pcr->sd_pull_ctl_enable_tbl;
624 	else if (card == RTSX_MS_CARD)
625 		tbl = pcr->ms_pull_ctl_enable_tbl;
626 	else
627 		return -EINVAL;
628 
629 	return rtsx_pci_set_pull_ctl(pcr, tbl);
630 }
631 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
632 
633 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
634 {
635 	const u32 *tbl;
636 
637 	if (card == RTSX_SD_CARD)
638 		tbl = pcr->sd_pull_ctl_disable_tbl;
639 	else if (card == RTSX_MS_CARD)
640 		tbl = pcr->ms_pull_ctl_disable_tbl;
641 	else
642 		return -EINVAL;
643 
644 	return rtsx_pci_set_pull_ctl(pcr, tbl);
645 }
646 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
647 
648 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
649 {
650 	struct rtsx_hw_param *hw_param = &pcr->hw_param;
651 
652 	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
653 		| hw_param->interrupt_en;
654 
655 	if (pcr->num_slots > 1)
656 		pcr->bier |= MS_INT_EN;
657 
658 	/* Enable Bus Interrupt */
659 	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
660 
661 	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
662 }
663 
664 static inline u8 double_ssc_depth(u8 depth)
665 {
666 	return ((depth > 1) ? (depth - 1) : depth);
667 }
668 
669 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
670 {
671 	if (div > CLK_DIV_1) {
672 		if (ssc_depth > (div - 1))
673 			ssc_depth -= (div - 1);
674 		else
675 			ssc_depth = SSC_DEPTH_4M;
676 	}
677 
678 	return ssc_depth;
679 }
680 
681 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
682 		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
683 {
684 	int err, clk;
685 	u8 n, clk_divider, mcu_cnt, div;
686 	static const u8 depth[] = {
687 		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
688 		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
689 		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
690 		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
691 		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
692 	};
693 
694 	if (PCI_PID(pcr) == PID_5261)
695 		return rts5261_pci_switch_clock(pcr, card_clock,
696 				ssc_depth, initial_mode, double_clk, vpclk);
697 	if (PCI_PID(pcr) == PID_5228)
698 		return rts5228_pci_switch_clock(pcr, card_clock,
699 				ssc_depth, initial_mode, double_clk, vpclk);
700 	if (PCI_PID(pcr) == PID_5264)
701 		return rts5264_pci_switch_clock(pcr, card_clock,
702 				ssc_depth, initial_mode, double_clk, vpclk);
703 
704 	if (initial_mode) {
705 		/* We use 250k(around) here, in initial stage */
706 		clk_divider = SD_CLK_DIVIDE_128;
707 		card_clock = 30000000;
708 	} else {
709 		clk_divider = SD_CLK_DIVIDE_0;
710 	}
711 	err = rtsx_pci_write_register(pcr, SD_CFG1,
712 			SD_CLK_DIVIDE_MASK, clk_divider);
713 	if (err < 0)
714 		return err;
715 
716 	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
717 	if (card_clock == UHS_SDR104_MAX_DTR &&
718 	    pcr->dma_error_count &&
719 	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
720 		card_clock = UHS_SDR104_MAX_DTR -
721 			(pcr->dma_error_count * 20000000);
722 
723 	card_clock /= 1000000;
724 	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
725 
726 	clk = card_clock;
727 	if (!initial_mode && double_clk)
728 		clk = card_clock * 2;
729 	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
730 		clk, pcr->cur_clock);
731 
732 	if (clk == pcr->cur_clock)
733 		return 0;
734 
735 	if (pcr->ops->conv_clk_and_div_n)
736 		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
737 	else
738 		n = (u8)(clk - 2);
739 	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
740 		return -EINVAL;
741 
742 	mcu_cnt = (u8)(125/clk + 3);
743 	if (mcu_cnt > 15)
744 		mcu_cnt = 15;
745 
746 	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
747 	div = CLK_DIV_1;
748 	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
749 		if (pcr->ops->conv_clk_and_div_n) {
750 			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
751 					DIV_N_TO_CLK) * 2;
752 			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
753 					CLK_TO_DIV_N);
754 		} else {
755 			n = (n + 2) * 2 - 2;
756 		}
757 		div++;
758 	}
759 	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
760 
761 	ssc_depth = depth[ssc_depth];
762 	if (double_clk)
763 		ssc_depth = double_ssc_depth(ssc_depth);
764 
765 	ssc_depth = revise_ssc_depth(ssc_depth, div);
766 	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
767 
768 	rtsx_pci_init_cmd(pcr);
769 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
770 			CLK_LOW_FREQ, CLK_LOW_FREQ);
771 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
772 			0xFF, (div << 4) | mcu_cnt);
773 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
774 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
775 			SSC_DEPTH_MASK, ssc_depth);
776 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
777 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
778 	if (vpclk) {
779 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
780 				PHASE_NOT_RESET, 0);
781 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
782 				PHASE_NOT_RESET, PHASE_NOT_RESET);
783 	}
784 
785 	err = rtsx_pci_send_cmd(pcr, 2000);
786 	if (err < 0)
787 		return err;
788 
789 	/* Wait SSC clock stable */
790 	udelay(SSC_CLOCK_STABLE_WAIT);
791 	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
792 	if (err < 0)
793 		return err;
794 
795 	pcr->cur_clock = clk;
796 	return 0;
797 }
798 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
799 
800 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
801 {
802 	if (pcr->ops->card_power_on)
803 		return pcr->ops->card_power_on(pcr, card);
804 
805 	return 0;
806 }
807 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
808 
809 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
810 {
811 	if (pcr->ops->card_power_off)
812 		return pcr->ops->card_power_off(pcr, card);
813 
814 	return 0;
815 }
816 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
817 
818 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
819 {
820 	static const unsigned int cd_mask[] = {
821 		[RTSX_SD_CARD] = SD_EXIST,
822 		[RTSX_MS_CARD] = MS_EXIST
823 	};
824 
825 	if (!(pcr->flags & PCR_MS_PMOS)) {
826 		/* When using single PMOS, accessing card is not permitted
827 		 * if the existing card is not the designated one.
828 		 */
829 		if (pcr->card_exist & (~cd_mask[card]))
830 			return -EIO;
831 	}
832 
833 	return 0;
834 }
835 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
836 
837 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
838 {
839 	if (pcr->ops->switch_output_voltage)
840 		return pcr->ops->switch_output_voltage(pcr, voltage);
841 
842 	return 0;
843 }
844 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
845 
846 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
847 {
848 	unsigned int val;
849 
850 	val = rtsx_pci_readl(pcr, RTSX_BIPR);
851 	if (pcr->ops->cd_deglitch)
852 		val = pcr->ops->cd_deglitch(pcr);
853 
854 	return val;
855 }
856 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
857 
858 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
859 {
860 	struct completion finish;
861 
862 	pcr->finish_me = &finish;
863 	init_completion(&finish);
864 
865 	if (pcr->done)
866 		complete(pcr->done);
867 
868 	if (!pcr->remove_pci)
869 		rtsx_pci_stop_cmd(pcr);
870 
871 	wait_for_completion_interruptible_timeout(&finish,
872 			msecs_to_jiffies(2));
873 	pcr->finish_me = NULL;
874 }
875 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
876 
877 static void rtsx_pci_card_detect(struct work_struct *work)
878 {
879 	struct delayed_work *dwork;
880 	struct rtsx_pcr *pcr;
881 	unsigned long flags;
882 	unsigned int card_detect = 0, card_inserted, card_removed;
883 	u32 irq_status;
884 
885 	dwork = to_delayed_work(work);
886 	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
887 
888 	pcr_dbg(pcr, "--> %s\n", __func__);
889 
890 	mutex_lock(&pcr->pcr_mutex);
891 	spin_lock_irqsave(&pcr->lock, flags);
892 
893 	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
894 	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
895 
896 	irq_status &= CARD_EXIST;
897 	card_inserted = pcr->card_inserted & irq_status;
898 	card_removed = pcr->card_removed;
899 	pcr->card_inserted = 0;
900 	pcr->card_removed = 0;
901 
902 	spin_unlock_irqrestore(&pcr->lock, flags);
903 
904 	if (card_inserted || card_removed) {
905 		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
906 			card_inserted, card_removed);
907 
908 		if (pcr->ops->cd_deglitch)
909 			card_inserted = pcr->ops->cd_deglitch(pcr);
910 
911 		card_detect = card_inserted | card_removed;
912 
913 		pcr->card_exist |= card_inserted;
914 		pcr->card_exist &= ~card_removed;
915 	}
916 
917 	mutex_unlock(&pcr->pcr_mutex);
918 
919 	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
920 		pcr->slots[RTSX_SD_CARD].card_event(
921 				pcr->slots[RTSX_SD_CARD].p_dev);
922 	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
923 		pcr->slots[RTSX_MS_CARD].card_event(
924 				pcr->slots[RTSX_MS_CARD].p_dev);
925 }
926 
927 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
928 {
929 	if (pcr->ops->process_ocp) {
930 		pcr->ops->process_ocp(pcr);
931 	} else {
932 		if (!pcr->option.ocp_en)
933 			return;
934 		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
935 		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
936 			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
937 			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
938 			rtsx_pci_clear_ocpstat(pcr);
939 			pcr->ocp_stat = 0;
940 		}
941 	}
942 }
943 
944 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
945 {
946 	if (pcr->option.ocp_en)
947 		rtsx_pci_process_ocp(pcr);
948 
949 	return 0;
950 }
951 
952 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
953 {
954 	struct rtsx_pcr *pcr = dev_id;
955 	u32 int_reg;
956 
957 	if (!pcr)
958 		return IRQ_NONE;
959 
960 	spin_lock(&pcr->lock);
961 
962 	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
963 	/* Clear interrupt flag */
964 	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
965 	if ((int_reg & pcr->bier) == 0) {
966 		spin_unlock(&pcr->lock);
967 		return IRQ_NONE;
968 	}
969 	if (int_reg == 0xFFFFFFFF) {
970 		spin_unlock(&pcr->lock);
971 		return IRQ_HANDLED;
972 	}
973 
974 	int_reg &= (pcr->bier | 0x7FFFFF);
975 
976 	if ((int_reg & SD_OC_INT) ||
977 			((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264)))
978 		rtsx_pci_process_ocp_interrupt(pcr);
979 
980 	if (int_reg & SD_INT) {
981 		if (int_reg & SD_EXIST) {
982 			pcr->card_inserted |= SD_EXIST;
983 		} else {
984 			pcr->card_removed |= SD_EXIST;
985 			pcr->card_inserted &= ~SD_EXIST;
986 		}
987 
988 		if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
989 			rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
990 				RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
991 			pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
992 		}
993 
994 		pcr->dma_error_count = 0;
995 	}
996 
997 	if (int_reg & MS_INT) {
998 		if (int_reg & MS_EXIST) {
999 			pcr->card_inserted |= MS_EXIST;
1000 		} else {
1001 			pcr->card_removed |= MS_EXIST;
1002 			pcr->card_inserted &= ~MS_EXIST;
1003 		}
1004 	}
1005 
1006 	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1007 		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1008 			pcr->trans_result = TRANS_RESULT_FAIL;
1009 			if (pcr->done)
1010 				complete(pcr->done);
1011 		} else if (int_reg & TRANS_OK_INT) {
1012 			pcr->trans_result = TRANS_RESULT_OK;
1013 			if (pcr->done)
1014 				complete(pcr->done);
1015 		}
1016 	}
1017 
1018 	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1019 		schedule_delayed_work(&pcr->carddet_work,
1020 				msecs_to_jiffies(200));
1021 
1022 	spin_unlock(&pcr->lock);
1023 	return IRQ_HANDLED;
1024 }
1025 
1026 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1027 {
1028 	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1029 			__func__, pcr->msi_en, pcr->pci->irq);
1030 
1031 	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1032 			pcr->msi_en ? 0 : IRQF_SHARED,
1033 			DRV_NAME_RTSX_PCI, pcr)) {
1034 		dev_err(&(pcr->pci->dev),
1035 			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1036 			pcr->pci->irq);
1037 		return -1;
1038 	}
1039 
1040 	pcr->irq = pcr->pci->irq;
1041 	pci_intx(pcr->pci, !pcr->msi_en);
1042 
1043 	return 0;
1044 }
1045 
1046 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1047 {
1048 	/* Set relink_time to 0 */
1049 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1050 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1051 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1052 			RELINK_TIME_MASK, 0);
1053 
1054 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1055 			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1056 
1057 	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1058 }
1059 
1060 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1061 {
1062 	if (pcr->ops->turn_off_led)
1063 		pcr->ops->turn_off_led(pcr);
1064 
1065 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1066 	pcr->bier = 0;
1067 
1068 	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1069 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1070 
1071 	if (pcr->ops->force_power_down)
1072 		pcr->ops->force_power_down(pcr, pm_state, runtime);
1073 	else
1074 		rtsx_base_force_power_down(pcr);
1075 }
1076 
1077 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1078 {
1079 	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1080 
1081 	if (pcr->ops->enable_ocp) {
1082 		pcr->ops->enable_ocp(pcr);
1083 	} else {
1084 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1085 		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1086 	}
1087 
1088 }
1089 
1090 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1091 {
1092 	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1093 
1094 	if (pcr->ops->disable_ocp) {
1095 		pcr->ops->disable_ocp(pcr);
1096 	} else {
1097 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1098 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1099 				OC_POWER_DOWN);
1100 	}
1101 }
1102 
1103 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1104 {
1105 	if (pcr->ops->init_ocp) {
1106 		pcr->ops->init_ocp(pcr);
1107 	} else {
1108 		struct rtsx_cr_option *option = &(pcr->option);
1109 
1110 		if (option->ocp_en) {
1111 			u8 val = option->sd_800mA_ocp_thd;
1112 
1113 			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1114 			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1115 				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1116 			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1117 				SD_OCP_THD_MASK, val);
1118 			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1119 				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1120 			rtsx_pci_enable_ocp(pcr);
1121 		}
1122 	}
1123 }
1124 
1125 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1126 {
1127 	if (pcr->ops->get_ocpstat)
1128 		return pcr->ops->get_ocpstat(pcr, val);
1129 	else
1130 		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1131 }
1132 
1133 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1134 {
1135 	if (pcr->ops->clear_ocpstat) {
1136 		pcr->ops->clear_ocpstat(pcr);
1137 	} else {
1138 		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1139 		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1140 
1141 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1142 		udelay(100);
1143 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1144 	}
1145 }
1146 
1147 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1148 {
1149 	u16 val;
1150 
1151 	if ((PCI_PID(pcr) != PID_525A) &&
1152 		(PCI_PID(pcr) != PID_5260) &&
1153 		(PCI_PID(pcr) != PID_5264)) {
1154 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1155 		val |= 1<<9;
1156 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1157 	}
1158 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1159 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1160 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1161 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1162 
1163 }
1164 
1165 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1166 {
1167 	u16 val;
1168 
1169 	if ((PCI_PID(pcr) != PID_525A) &&
1170 		(PCI_PID(pcr) != PID_5260) &&
1171 		(PCI_PID(pcr) != PID_5264)) {
1172 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1173 		val &= ~(1<<9);
1174 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1175 	}
1176 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1177 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1178 
1179 }
1180 
1181 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1182 {
1183 	struct pci_dev *pdev = pcr->pci;
1184 	int err;
1185 
1186 	if (PCI_PID(pcr) == PID_5228)
1187 		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1188 				RTS5228_LDO1_SR_0_5);
1189 
1190 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1191 
1192 	rtsx_pci_enable_bus_int(pcr);
1193 
1194 	/* Power on SSC */
1195 	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) {
1196 		/* Gating real mcu clock */
1197 		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1198 			RTS5261_MCU_CLOCK_GATING, 0);
1199 		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1200 			SSC_POWER_DOWN, 0);
1201 	} else {
1202 		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1203 	}
1204 	if (err < 0)
1205 		return err;
1206 
1207 	/* Wait SSC power stable */
1208 	udelay(200);
1209 
1210 	rtsx_disable_aspm(pcr);
1211 	if (pcr->ops->optimize_phy) {
1212 		err = pcr->ops->optimize_phy(pcr);
1213 		if (err < 0)
1214 			return err;
1215 	}
1216 
1217 	rtsx_pci_init_cmd(pcr);
1218 
1219 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1220 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1221 
1222 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1223 	/* Disable card clock */
1224 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1225 	/* Reset delink mode */
1226 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1227 	/* Card driving select */
1228 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1229 			0xFF, pcr->card_drive_sel);
1230 	/* Enable SSC Clock */
1231 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1232 			0xFF, SSC_8X_EN | SSC_SEL_4M);
1233 	if (PCI_PID(pcr) == PID_5261)
1234 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1235 			RTS5261_SSC_DEPTH_2M);
1236 	else if (PCI_PID(pcr) == PID_5228)
1237 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1238 			RTS5228_SSC_DEPTH_2M);
1239 	else if (is_version(pcr, 0x5264, IC_VER_A))
1240 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
1241 	else if (PCI_PID(pcr) == PID_5264)
1242 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1243 			RTS5264_SSC_DEPTH_2M);
1244 	else
1245 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1246 
1247 	/* Disable cd_pwr_save */
1248 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1249 	/* Clear Link Ready Interrupt */
1250 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1251 			LINK_RDY_INT, LINK_RDY_INT);
1252 	/* Enlarge the estimation window of PERST# glitch
1253 	 * to reduce the chance of invalid card interrupt
1254 	 */
1255 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1256 	/* Update RC oscillator to 400k
1257 	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1258 	 *                1: 2M  0: 400k
1259 	 */
1260 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1261 	/* Set interrupt write clear
1262 	 * bit 1: U_elbi_if_rd_clr_en
1263 	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1264 	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1265 	 */
1266 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1267 
1268 	err = rtsx_pci_send_cmd(pcr, 100);
1269 	if (err < 0)
1270 		return err;
1271 
1272 	switch (PCI_PID(pcr)) {
1273 	case PID_5250:
1274 	case PID_524A:
1275 	case PID_525A:
1276 	case PID_5260:
1277 	case PID_5261:
1278 	case PID_5228:
1279 	case PID_5264:
1280 		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1281 		break;
1282 	default:
1283 		break;
1284 	}
1285 
1286 	/*init ocp*/
1287 	rtsx_pci_init_ocp(pcr);
1288 
1289 	/* Enable clk_request_n to enable clock power management */
1290 	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1291 					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1292 	/* Enter L1 when host tx idle */
1293 	pci_write_config_byte(pdev, 0x70F, 0x5B);
1294 
1295 	if (pcr->ops->extra_init_hw) {
1296 		err = pcr->ops->extra_init_hw(pcr);
1297 		if (err < 0)
1298 			return err;
1299 	}
1300 
1301 	if (pcr->aspm_mode == ASPM_MODE_REG)
1302 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1303 
1304 	/* No CD interrupt if probing driver with card inserted.
1305 	 * So we need to initialize pcr->card_exist here.
1306 	 */
1307 	if (pcr->ops->cd_deglitch)
1308 		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1309 	else
1310 		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1311 
1312 	return 0;
1313 }
1314 
1315 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1316 {
1317 	struct rtsx_cr_option *option = &(pcr->option);
1318 	int err, l1ss;
1319 	u32 lval;
1320 	u16 cfg_val;
1321 	u8 val;
1322 
1323 	spin_lock_init(&pcr->lock);
1324 	mutex_init(&pcr->pcr_mutex);
1325 
1326 	switch (PCI_PID(pcr)) {
1327 	default:
1328 	case 0x5209:
1329 		rts5209_init_params(pcr);
1330 		break;
1331 
1332 	case 0x5229:
1333 		rts5229_init_params(pcr);
1334 		break;
1335 
1336 	case 0x5289:
1337 		rtl8411_init_params(pcr);
1338 		break;
1339 
1340 	case 0x5227:
1341 		rts5227_init_params(pcr);
1342 		break;
1343 
1344 	case 0x522A:
1345 		rts522a_init_params(pcr);
1346 		break;
1347 
1348 	case 0x5249:
1349 		rts5249_init_params(pcr);
1350 		break;
1351 
1352 	case 0x524A:
1353 		rts524a_init_params(pcr);
1354 		break;
1355 
1356 	case 0x525A:
1357 		rts525a_init_params(pcr);
1358 		break;
1359 
1360 	case 0x5287:
1361 		rtl8411b_init_params(pcr);
1362 		break;
1363 
1364 	case 0x5286:
1365 		rtl8402_init_params(pcr);
1366 		break;
1367 
1368 	case 0x5260:
1369 		rts5260_init_params(pcr);
1370 		break;
1371 
1372 	case 0x5261:
1373 		rts5261_init_params(pcr);
1374 		break;
1375 
1376 	case 0x5228:
1377 		rts5228_init_params(pcr);
1378 		break;
1379 
1380 	case 0x5264:
1381 		rts5264_init_params(pcr);
1382 		break;
1383 	}
1384 
1385 	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1386 			PCI_PID(pcr), pcr->ic_version);
1387 
1388 	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1389 			GFP_KERNEL);
1390 	if (!pcr->slots)
1391 		return -ENOMEM;
1392 
1393 	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1394 		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1395 		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1396 			pcr->aspm_enabled = true;
1397 		else
1398 			pcr->aspm_enabled = false;
1399 
1400 	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1401 		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1402 		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1403 			pcr->aspm_enabled = false;
1404 		else
1405 			pcr->aspm_enabled = true;
1406 	}
1407 
1408 	l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1409 	if (l1ss) {
1410 		pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1411 
1412 		if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1413 			rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1414 		else
1415 			rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1416 
1417 		if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1418 			rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1419 		else
1420 			rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1421 
1422 		if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1423 			rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1424 		else
1425 			rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1426 
1427 		if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1428 			rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1429 		else
1430 			rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1431 
1432 		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1433 		if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1434 			option->ltr_enabled = true;
1435 			option->ltr_active = true;
1436 		} else {
1437 			option->ltr_enabled = false;
1438 		}
1439 
1440 		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1441 				| PM_L1_1_EN | PM_L1_2_EN))
1442 			option->force_clkreq_0 = false;
1443 		else
1444 			option->force_clkreq_0 = true;
1445 	} else {
1446 		option->ltr_enabled = false;
1447 		option->force_clkreq_0 = true;
1448 	}
1449 
1450 	if (pcr->ops->fetch_vendor_settings)
1451 		pcr->ops->fetch_vendor_settings(pcr);
1452 
1453 	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1454 	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1455 			pcr->sd30_drive_sel_1v8);
1456 	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1457 			pcr->sd30_drive_sel_3v3);
1458 	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1459 			pcr->card_drive_sel);
1460 	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1461 
1462 	pcr->state = PDEV_STAT_IDLE;
1463 	err = rtsx_pci_init_hw(pcr);
1464 	if (err < 0) {
1465 		kfree(pcr->slots);
1466 		return err;
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 static int rtsx_pci_probe(struct pci_dev *pcidev,
1473 			  const struct pci_device_id *id)
1474 {
1475 	struct rtsx_pcr *pcr;
1476 	struct pcr_handle *handle;
1477 	u32 base, len;
1478 	int ret, i, bar = 0;
1479 
1480 	dev_dbg(&(pcidev->dev),
1481 		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1482 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1483 		(int)pcidev->revision);
1484 
1485 	ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1486 	if (ret < 0)
1487 		return ret;
1488 
1489 	ret = pci_enable_device(pcidev);
1490 	if (ret)
1491 		return ret;
1492 
1493 	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1494 	if (ret)
1495 		goto disable;
1496 
1497 	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1498 	if (!pcr) {
1499 		ret = -ENOMEM;
1500 		goto release_pci;
1501 	}
1502 
1503 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1504 	if (!handle) {
1505 		ret = -ENOMEM;
1506 		goto free_pcr;
1507 	}
1508 	handle->pcr = pcr;
1509 
1510 	idr_preload(GFP_KERNEL);
1511 	spin_lock(&rtsx_pci_lock);
1512 	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1513 	if (ret >= 0)
1514 		pcr->id = ret;
1515 	spin_unlock(&rtsx_pci_lock);
1516 	idr_preload_end();
1517 	if (ret < 0)
1518 		goto free_handle;
1519 
1520 	pcr->pci = pcidev;
1521 	dev_set_drvdata(&pcidev->dev, handle);
1522 
1523 	if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264)))
1524 		bar = 1;
1525 	len = pci_resource_len(pcidev, bar);
1526 	base = pci_resource_start(pcidev, bar);
1527 	pcr->remap_addr = ioremap(base, len);
1528 	if (!pcr->remap_addr) {
1529 		ret = -ENOMEM;
1530 		goto free_idr;
1531 	}
1532 
1533 	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1534 			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1535 			GFP_KERNEL);
1536 	if (pcr->rtsx_resv_buf == NULL) {
1537 		ret = -ENXIO;
1538 		goto unmap;
1539 	}
1540 	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1541 	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1542 	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1543 	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1544 	pcr->card_inserted = 0;
1545 	pcr->card_removed = 0;
1546 	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1547 
1548 	pcr->msi_en = msi_en;
1549 	if (pcr->msi_en) {
1550 		ret = pci_enable_msi(pcidev);
1551 		if (ret)
1552 			pcr->msi_en = false;
1553 	}
1554 
1555 	ret = rtsx_pci_acquire_irq(pcr);
1556 	if (ret < 0)
1557 		goto disable_msi;
1558 
1559 	pci_set_master(pcidev);
1560 	synchronize_irq(pcr->irq);
1561 
1562 	ret = rtsx_pci_init_chip(pcr);
1563 	if (ret < 0)
1564 		goto disable_irq;
1565 
1566 	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1567 		rtsx_pcr_cells[i].platform_data = handle;
1568 		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1569 	}
1570 
1571 
1572 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1573 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1574 	if (ret < 0)
1575 		goto free_slots;
1576 
1577 	pm_runtime_allow(&pcidev->dev);
1578 	pm_runtime_put(&pcidev->dev);
1579 
1580 	return 0;
1581 
1582 free_slots:
1583 	kfree(pcr->slots);
1584 disable_irq:
1585 	free_irq(pcr->irq, (void *)pcr);
1586 disable_msi:
1587 	if (pcr->msi_en)
1588 		pci_disable_msi(pcr->pci);
1589 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1590 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1591 unmap:
1592 	iounmap(pcr->remap_addr);
1593 free_idr:
1594 	spin_lock(&rtsx_pci_lock);
1595 	idr_remove(&rtsx_pci_idr, pcr->id);
1596 	spin_unlock(&rtsx_pci_lock);
1597 free_handle:
1598 	kfree(handle);
1599 free_pcr:
1600 	kfree(pcr);
1601 release_pci:
1602 	pci_release_regions(pcidev);
1603 disable:
1604 	pci_disable_device(pcidev);
1605 
1606 	return ret;
1607 }
1608 
1609 static void rtsx_pci_remove(struct pci_dev *pcidev)
1610 {
1611 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1612 	struct rtsx_pcr *pcr = handle->pcr;
1613 
1614 	pcr->remove_pci = true;
1615 
1616 	pm_runtime_get_sync(&pcidev->dev);
1617 	pm_runtime_forbid(&pcidev->dev);
1618 
1619 	/* Disable interrupts at the pcr level */
1620 	spin_lock_irq(&pcr->lock);
1621 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1622 	pcr->bier = 0;
1623 	spin_unlock_irq(&pcr->lock);
1624 
1625 	cancel_delayed_work_sync(&pcr->carddet_work);
1626 
1627 	mfd_remove_devices(&pcidev->dev);
1628 
1629 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1630 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1631 	free_irq(pcr->irq, (void *)pcr);
1632 	if (pcr->msi_en)
1633 		pci_disable_msi(pcr->pci);
1634 	iounmap(pcr->remap_addr);
1635 
1636 	pci_release_regions(pcidev);
1637 	pci_disable_device(pcidev);
1638 
1639 	spin_lock(&rtsx_pci_lock);
1640 	idr_remove(&rtsx_pci_idr, pcr->id);
1641 	spin_unlock(&rtsx_pci_lock);
1642 
1643 	kfree(pcr->slots);
1644 	kfree(pcr);
1645 	kfree(handle);
1646 
1647 	dev_dbg(&(pcidev->dev),
1648 		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1649 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1650 }
1651 
1652 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1653 {
1654 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1655 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1656 	struct rtsx_pcr *pcr = handle->pcr;
1657 
1658 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1659 
1660 	cancel_delayed_work_sync(&pcr->carddet_work);
1661 
1662 	mutex_lock(&pcr->pcr_mutex);
1663 
1664 	rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1665 
1666 	mutex_unlock(&pcr->pcr_mutex);
1667 	return 0;
1668 }
1669 
1670 static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1671 {
1672 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1673 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1674 	struct rtsx_pcr *pcr = handle->pcr;
1675 	int ret = 0;
1676 
1677 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1678 
1679 	mutex_lock(&pcr->pcr_mutex);
1680 
1681 	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1682 	if (ret)
1683 		goto out;
1684 
1685 	ret = rtsx_pci_init_hw(pcr);
1686 	if (ret)
1687 		goto out;
1688 
1689 out:
1690 	mutex_unlock(&pcr->pcr_mutex);
1691 	return ret;
1692 }
1693 
1694 #ifdef CONFIG_PM
1695 
1696 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1697 {
1698 	if (pcr->ops->set_aspm)
1699 		pcr->ops->set_aspm(pcr, true);
1700 	else
1701 		rtsx_comm_set_aspm(pcr, true);
1702 }
1703 
1704 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1705 {
1706 	struct rtsx_cr_option *option = &pcr->option;
1707 
1708 	if (option->ltr_enabled) {
1709 		u32 latency = option->ltr_l1off_latency;
1710 
1711 		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1712 			mdelay(option->l1_snooze_delay);
1713 
1714 		rtsx_set_ltr_latency(pcr, latency);
1715 	}
1716 
1717 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1718 		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1719 
1720 	rtsx_enable_aspm(pcr);
1721 }
1722 
1723 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1724 {
1725 	rtsx_comm_pm_power_saving(pcr);
1726 }
1727 
1728 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1729 {
1730 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1731 	struct rtsx_pcr *pcr = handle->pcr;
1732 
1733 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1734 
1735 	rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1736 
1737 	pci_disable_device(pcidev);
1738 	free_irq(pcr->irq, (void *)pcr);
1739 	if (pcr->msi_en)
1740 		pci_disable_msi(pcr->pci);
1741 }
1742 
1743 static int rtsx_pci_runtime_idle(struct device *device)
1744 {
1745 	struct pci_dev *pcidev = to_pci_dev(device);
1746 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1747 	struct rtsx_pcr *pcr = handle->pcr;
1748 
1749 	dev_dbg(device, "--> %s\n", __func__);
1750 
1751 	mutex_lock(&pcr->pcr_mutex);
1752 
1753 	pcr->state = PDEV_STAT_IDLE;
1754 
1755 	if (pcr->ops->disable_auto_blink)
1756 		pcr->ops->disable_auto_blink(pcr);
1757 	if (pcr->ops->turn_off_led)
1758 		pcr->ops->turn_off_led(pcr);
1759 
1760 	rtsx_pm_power_saving(pcr);
1761 
1762 	mutex_unlock(&pcr->pcr_mutex);
1763 
1764 	if (pcr->rtd3_en)
1765 		pm_schedule_suspend(device, 10000);
1766 
1767 	return -EBUSY;
1768 }
1769 
1770 static int rtsx_pci_runtime_suspend(struct device *device)
1771 {
1772 	struct pci_dev *pcidev = to_pci_dev(device);
1773 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1774 	struct rtsx_pcr *pcr = handle->pcr;
1775 
1776 	dev_dbg(device, "--> %s\n", __func__);
1777 
1778 	cancel_delayed_work_sync(&pcr->carddet_work);
1779 
1780 	mutex_lock(&pcr->pcr_mutex);
1781 	rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1782 
1783 	mutex_unlock(&pcr->pcr_mutex);
1784 
1785 	return 0;
1786 }
1787 
1788 static int rtsx_pci_runtime_resume(struct device *device)
1789 {
1790 	struct pci_dev *pcidev = to_pci_dev(device);
1791 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1792 	struct rtsx_pcr *pcr = handle->pcr;
1793 
1794 	dev_dbg(device, "--> %s\n", __func__);
1795 
1796 	mutex_lock(&pcr->pcr_mutex);
1797 
1798 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1799 
1800 	rtsx_pci_init_hw(pcr);
1801 
1802 	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1803 		pcr->slots[RTSX_SD_CARD].card_event(
1804 				pcr->slots[RTSX_SD_CARD].p_dev);
1805 	}
1806 
1807 	mutex_unlock(&pcr->pcr_mutex);
1808 	return 0;
1809 }
1810 
1811 #else /* CONFIG_PM */
1812 
1813 #define rtsx_pci_shutdown NULL
1814 #define rtsx_pci_runtime_suspend NULL
1815 #define rtsx_pic_runtime_resume NULL
1816 
1817 #endif /* CONFIG_PM */
1818 
1819 static const struct dev_pm_ops rtsx_pci_pm_ops = {
1820 	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1821 	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1822 };
1823 
1824 static struct pci_driver rtsx_pci_driver = {
1825 	.name = DRV_NAME_RTSX_PCI,
1826 	.id_table = rtsx_pci_ids,
1827 	.probe = rtsx_pci_probe,
1828 	.remove = rtsx_pci_remove,
1829 	.driver.pm = &rtsx_pci_pm_ops,
1830 	.shutdown = rtsx_pci_shutdown,
1831 };
1832 module_pci_driver(rtsx_pci_driver);
1833 
1834 MODULE_LICENSE("GPL");
1835 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1836 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1837