1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 
38 #include <linux/mmc/ioctl.h>
39 #include <linux/mmc/card.h>
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/mmc.h>
42 #include <linux/mmc/sd.h>
43 
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 
47 #include "queue.h"
48 
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
52 #endif
53 #define MODULE_PARAM_PREFIX "mmcblk."
54 
55 #define INAND_CMD38_ARG_EXT_CSD  113
56 #define INAND_CMD38_ARG_ERASE    0x00
57 #define INAND_CMD38_ARG_TRIM     0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 
62 static DEFINE_MUTEX(block_mutex);
63 
64 /*
65  * The defaults come from config options but can be overriden by module
66  * or bootarg options.
67  */
68 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
69 
70 /*
71  * We've only got one major, so number of mmcblk devices is
72  * limited to 256 / number of minors per device.
73  */
74 static int max_devices;
75 
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use, 256);
78 static DECLARE_BITMAP(name_use, 256);
79 
80 /*
81  * There is one mmc_blk_data per slot.
82  */
83 struct mmc_blk_data {
84 	spinlock_t	lock;
85 	struct gendisk	*disk;
86 	struct mmc_queue queue;
87 	struct list_head part;
88 
89 	unsigned int	flags;
90 #define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
91 #define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
92 
93 	unsigned int	usage;
94 	unsigned int	read_only;
95 	unsigned int	part_type;
96 	unsigned int	name_idx;
97 	unsigned int	reset_done;
98 #define MMC_BLK_READ		BIT(0)
99 #define MMC_BLK_WRITE		BIT(1)
100 #define MMC_BLK_DISCARD		BIT(2)
101 #define MMC_BLK_SECDISCARD	BIT(3)
102 
103 	/*
104 	 * Only set in main mmc_blk_data associated
105 	 * with mmc_card with mmc_set_drvdata, and keeps
106 	 * track of the current selected device partition.
107 	 */
108 	unsigned int	part_curr;
109 	struct device_attribute force_ro;
110 	struct device_attribute power_ro_lock;
111 	int	area_type;
112 };
113 
114 static DEFINE_MUTEX(open_lock);
115 
116 enum mmc_blk_status {
117 	MMC_BLK_SUCCESS = 0,
118 	MMC_BLK_PARTIAL,
119 	MMC_BLK_CMD_ERR,
120 	MMC_BLK_RETRY,
121 	MMC_BLK_ABORT,
122 	MMC_BLK_DATA_ERR,
123 	MMC_BLK_ECC_ERR,
124 	MMC_BLK_NOMEDIUM,
125 };
126 
127 module_param(perdev_minors, int, 0444);
128 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
129 
mmc_blk_get(struct gendisk * disk)130 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
131 {
132 	struct mmc_blk_data *md;
133 
134 	mutex_lock(&open_lock);
135 	md = disk->private_data;
136 	if (md && md->usage == 0)
137 		md = NULL;
138 	if (md)
139 		md->usage++;
140 	mutex_unlock(&open_lock);
141 
142 	return md;
143 }
144 
mmc_get_devidx(struct gendisk * disk)145 static inline int mmc_get_devidx(struct gendisk *disk)
146 {
147 	int devmaj = MAJOR(disk_devt(disk));
148 	int devidx = MINOR(disk_devt(disk)) / perdev_minors;
149 
150 	if (!devmaj)
151 		devidx = disk->first_minor / perdev_minors;
152 	return devidx;
153 }
154 
mmc_blk_put(struct mmc_blk_data * md)155 static void mmc_blk_put(struct mmc_blk_data *md)
156 {
157 	mutex_lock(&open_lock);
158 	md->usage--;
159 	if (md->usage == 0) {
160 		int devidx = mmc_get_devidx(md->disk);
161 		blk_cleanup_queue(md->queue.queue);
162 
163 		__clear_bit(devidx, dev_use);
164 
165 		put_disk(md->disk);
166 		kfree(md);
167 	}
168 	mutex_unlock(&open_lock);
169 }
170 
power_ro_lock_show(struct device * dev,struct device_attribute * attr,char * buf)171 static ssize_t power_ro_lock_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	int ret;
175 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
176 	struct mmc_card *card = md->queue.card;
177 	int locked = 0;
178 
179 	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
180 		locked = 2;
181 	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
182 		locked = 1;
183 
184 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
185 
186 	return ret;
187 }
188 
power_ro_lock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)189 static ssize_t power_ro_lock_store(struct device *dev,
190 		struct device_attribute *attr, const char *buf, size_t count)
191 {
192 	int ret;
193 	struct mmc_blk_data *md, *part_md;
194 	struct mmc_card *card;
195 	unsigned long set;
196 
197 	if (kstrtoul(buf, 0, &set))
198 		return -EINVAL;
199 
200 	if (set != 1)
201 		return count;
202 
203 	md = mmc_blk_get(dev_to_disk(dev));
204 	card = md->queue.card;
205 
206 	mmc_claim_host(card->host);
207 
208 	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
209 				card->ext_csd.boot_ro_lock |
210 				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
211 				card->ext_csd.part_time);
212 	if (ret)
213 		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
214 	else
215 		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
216 
217 	mmc_release_host(card->host);
218 
219 	if (!ret) {
220 		pr_info("%s: Locking boot partition ro until next power on\n",
221 			md->disk->disk_name);
222 		set_disk_ro(md->disk, 1);
223 
224 		list_for_each_entry(part_md, &md->part, part)
225 			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
226 				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
227 				set_disk_ro(part_md->disk, 1);
228 			}
229 	}
230 
231 	mmc_blk_put(md);
232 	return count;
233 }
234 
force_ro_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
236 			     char *buf)
237 {
238 	int ret;
239 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
240 
241 	ret = snprintf(buf, PAGE_SIZE, "%d",
242 		       get_disk_ro(dev_to_disk(dev)) ^
243 		       md->read_only);
244 	mmc_blk_put(md);
245 	return ret;
246 }
247 
force_ro_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)248 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
249 			      const char *buf, size_t count)
250 {
251 	int ret;
252 	char *end;
253 	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
254 	unsigned long set = simple_strtoul(buf, &end, 0);
255 	if (end == buf) {
256 		ret = -EINVAL;
257 		goto out;
258 	}
259 
260 	set_disk_ro(dev_to_disk(dev), set || md->read_only);
261 	ret = count;
262 out:
263 	mmc_blk_put(md);
264 	return ret;
265 }
266 
mmc_blk_open(struct block_device * bdev,fmode_t mode)267 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
268 {
269 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
270 	int ret = -ENXIO;
271 
272 	mutex_lock(&block_mutex);
273 	if (md) {
274 		if (md->usage == 2)
275 			check_disk_change(bdev);
276 		ret = 0;
277 
278 		if ((mode & FMODE_WRITE) && md->read_only) {
279 			mmc_blk_put(md);
280 			ret = -EROFS;
281 		}
282 	}
283 	mutex_unlock(&block_mutex);
284 
285 	return ret;
286 }
287 
mmc_blk_release(struct gendisk * disk,fmode_t mode)288 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
289 {
290 	struct mmc_blk_data *md = disk->private_data;
291 
292 	mutex_lock(&block_mutex);
293 	mmc_blk_put(md);
294 	mutex_unlock(&block_mutex);
295 	return 0;
296 }
297 
298 static int
mmc_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)299 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
300 {
301 	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
302 	geo->heads = 4;
303 	geo->sectors = 16;
304 	return 0;
305 }
306 
307 struct mmc_blk_ioc_data {
308 	struct mmc_ioc_cmd ic;
309 	unsigned char *buf;
310 	u64 buf_bytes;
311 };
312 
mmc_blk_ioctl_copy_from_user(struct mmc_ioc_cmd __user * user)313 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
314 	struct mmc_ioc_cmd __user *user)
315 {
316 	struct mmc_blk_ioc_data *idata;
317 	int err;
318 
319 	idata = kzalloc(sizeof(*idata), GFP_KERNEL);
320 	if (!idata) {
321 		err = -ENOMEM;
322 		goto out;
323 	}
324 
325 	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
326 		err = -EFAULT;
327 		goto idata_err;
328 	}
329 
330 	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
331 	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
332 		err = -EOVERFLOW;
333 		goto idata_err;
334 	}
335 
336 	if (!idata->buf_bytes)
337 		return idata;
338 
339 	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
340 	if (!idata->buf) {
341 		err = -ENOMEM;
342 		goto idata_err;
343 	}
344 
345 	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
346 					idata->ic.data_ptr, idata->buf_bytes)) {
347 		err = -EFAULT;
348 		goto copy_err;
349 	}
350 
351 	return idata;
352 
353 copy_err:
354 	kfree(idata->buf);
355 idata_err:
356 	kfree(idata);
357 out:
358 	return ERR_PTR(err);
359 }
360 
mmc_blk_ioctl_cmd(struct block_device * bdev,struct mmc_ioc_cmd __user * ic_ptr)361 static int mmc_blk_ioctl_cmd(struct block_device *bdev,
362 	struct mmc_ioc_cmd __user *ic_ptr)
363 {
364 	struct mmc_blk_ioc_data *idata;
365 	struct mmc_blk_data *md;
366 	struct mmc_card *card;
367 	struct mmc_command cmd = {0};
368 	struct mmc_data data = {0};
369 	struct mmc_request mrq = {NULL};
370 	struct scatterlist sg;
371 	int err;
372 
373 	/*
374 	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
375 	 * whole block device, not on a partition.  This prevents overspray
376 	 * between sibling partitions.
377 	 */
378 	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
379 		return -EPERM;
380 
381 	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
382 	if (IS_ERR(idata))
383 		return PTR_ERR(idata);
384 
385 	md = mmc_blk_get(bdev->bd_disk);
386 	if (!md) {
387 		err = -EINVAL;
388 		goto cmd_done;
389 	}
390 
391 	card = md->queue.card;
392 	if (IS_ERR(card)) {
393 		err = PTR_ERR(card);
394 		goto cmd_done;
395 	}
396 
397 	cmd.opcode = idata->ic.opcode;
398 	cmd.arg = idata->ic.arg;
399 	cmd.flags = idata->ic.flags;
400 
401 	if (idata->buf_bytes) {
402 		data.sg = &sg;
403 		data.sg_len = 1;
404 		data.blksz = idata->ic.blksz;
405 		data.blocks = idata->ic.blocks;
406 
407 		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
408 
409 		if (idata->ic.write_flag)
410 			data.flags = MMC_DATA_WRITE;
411 		else
412 			data.flags = MMC_DATA_READ;
413 
414 		/* data.flags must already be set before doing this. */
415 		mmc_set_data_timeout(&data, card);
416 
417 		/* Allow overriding the timeout_ns for empirical tuning. */
418 		if (idata->ic.data_timeout_ns)
419 			data.timeout_ns = idata->ic.data_timeout_ns;
420 
421 		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
422 			/*
423 			 * Pretend this is a data transfer and rely on the
424 			 * host driver to compute timeout.  When all host
425 			 * drivers support cmd.cmd_timeout for R1B, this
426 			 * can be changed to:
427 			 *
428 			 *     mrq.data = NULL;
429 			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
430 			 */
431 			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
432 		}
433 
434 		mrq.data = &data;
435 	}
436 
437 	mrq.cmd = &cmd;
438 
439 	mmc_claim_host(card->host);
440 
441 	if (idata->ic.is_acmd) {
442 		err = mmc_app_cmd(card->host, card);
443 		if (err)
444 			goto cmd_rel_host;
445 	}
446 
447 	mmc_wait_for_req(card->host, &mrq);
448 
449 	if (cmd.error) {
450 		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
451 						__func__, cmd.error);
452 		err = cmd.error;
453 		goto cmd_rel_host;
454 	}
455 	if (data.error) {
456 		dev_err(mmc_dev(card->host), "%s: data error %d\n",
457 						__func__, data.error);
458 		err = data.error;
459 		goto cmd_rel_host;
460 	}
461 
462 	/*
463 	 * According to the SD specs, some commands require a delay after
464 	 * issuing the command.
465 	 */
466 	if (idata->ic.postsleep_min_us)
467 		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
468 
469 	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
470 		err = -EFAULT;
471 		goto cmd_rel_host;
472 	}
473 
474 	if (!idata->ic.write_flag) {
475 		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
476 						idata->buf, idata->buf_bytes)) {
477 			err = -EFAULT;
478 			goto cmd_rel_host;
479 		}
480 	}
481 
482 cmd_rel_host:
483 	mmc_release_host(card->host);
484 
485 cmd_done:
486 	mmc_blk_put(md);
487 	kfree(idata->buf);
488 	kfree(idata);
489 	return err;
490 }
491 
mmc_blk_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)492 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
493 	unsigned int cmd, unsigned long arg)
494 {
495 	int ret = -EINVAL;
496 	if (cmd == MMC_IOC_CMD)
497 		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
498 	return ret;
499 }
500 
501 #ifdef CONFIG_COMPAT
mmc_blk_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)502 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
503 	unsigned int cmd, unsigned long arg)
504 {
505 	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
506 }
507 #endif
508 
509 static const struct block_device_operations mmc_bdops = {
510 	.open			= mmc_blk_open,
511 	.release		= mmc_blk_release,
512 	.getgeo			= mmc_blk_getgeo,
513 	.owner			= THIS_MODULE,
514 	.ioctl			= mmc_blk_ioctl,
515 #ifdef CONFIG_COMPAT
516 	.compat_ioctl		= mmc_blk_compat_ioctl,
517 #endif
518 };
519 
mmc_blk_part_switch(struct mmc_card * card,struct mmc_blk_data * md)520 static inline int mmc_blk_part_switch(struct mmc_card *card,
521 				      struct mmc_blk_data *md)
522 {
523 	int ret;
524 	struct mmc_blk_data *main_md = mmc_get_drvdata(card);
525 
526 	if (main_md->part_curr == md->part_type)
527 		return 0;
528 
529 	if (mmc_card_mmc(card)) {
530 		u8 part_config = card->ext_csd.part_config;
531 
532 		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
533 		part_config |= md->part_type;
534 
535 		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
536 				 EXT_CSD_PART_CONFIG, part_config,
537 				 card->ext_csd.part_time);
538 		if (ret)
539 			return ret;
540 
541 		card->ext_csd.part_config = part_config;
542 	}
543 
544 	main_md->part_curr = md->part_type;
545 	return 0;
546 }
547 
mmc_sd_num_wr_blocks(struct mmc_card * card)548 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
549 {
550 	int err;
551 	u32 result;
552 	__be32 *blocks;
553 
554 	struct mmc_request mrq = {NULL};
555 	struct mmc_command cmd = {0};
556 	struct mmc_data data = {0};
557 	unsigned int timeout_us;
558 
559 	struct scatterlist sg;
560 
561 	cmd.opcode = MMC_APP_CMD;
562 	cmd.arg = card->rca << 16;
563 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
564 
565 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
566 	if (err)
567 		return (u32)-1;
568 	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
569 		return (u32)-1;
570 
571 	memset(&cmd, 0, sizeof(struct mmc_command));
572 
573 	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
574 	cmd.arg = 0;
575 	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
576 
577 	data.timeout_ns = card->csd.tacc_ns * 100;
578 	data.timeout_clks = card->csd.tacc_clks * 100;
579 
580 	timeout_us = data.timeout_ns / 1000;
581 	timeout_us += data.timeout_clks * 1000 /
582 		(card->host->ios.clock / 1000);
583 
584 	if (timeout_us > 100000) {
585 		data.timeout_ns = 100000000;
586 		data.timeout_clks = 0;
587 	}
588 
589 	data.blksz = 4;
590 	data.blocks = 1;
591 	data.flags = MMC_DATA_READ;
592 	data.sg = &sg;
593 	data.sg_len = 1;
594 
595 	mrq.cmd = &cmd;
596 	mrq.data = &data;
597 
598 	blocks = kmalloc(4, GFP_KERNEL);
599 	if (!blocks)
600 		return (u32)-1;
601 
602 	sg_init_one(&sg, blocks, 4);
603 
604 	mmc_wait_for_req(card->host, &mrq);
605 
606 	result = ntohl(*blocks);
607 	kfree(blocks);
608 
609 	if (cmd.error || data.error)
610 		result = (u32)-1;
611 
612 	return result;
613 }
614 
send_stop(struct mmc_card * card,u32 * status)615 static int send_stop(struct mmc_card *card, u32 *status)
616 {
617 	struct mmc_command cmd = {0};
618 	int err;
619 
620 	cmd.opcode = MMC_STOP_TRANSMISSION;
621 	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
622 	err = mmc_wait_for_cmd(card->host, &cmd, 5);
623 	if (err == 0)
624 		*status = cmd.resp[0];
625 	return err;
626 }
627 
get_card_status(struct mmc_card * card,u32 * status,int retries)628 static int get_card_status(struct mmc_card *card, u32 *status, int retries)
629 {
630 	struct mmc_command cmd = {0};
631 	int err;
632 
633 	cmd.opcode = MMC_SEND_STATUS;
634 	if (!mmc_host_is_spi(card->host))
635 		cmd.arg = card->rca << 16;
636 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
637 	err = mmc_wait_for_cmd(card->host, &cmd, retries);
638 	if (err == 0)
639 		*status = cmd.resp[0];
640 	return err;
641 }
642 
643 #define ERR_NOMEDIUM	3
644 #define ERR_RETRY	2
645 #define ERR_ABORT	1
646 #define ERR_CONTINUE	0
647 
mmc_blk_cmd_error(struct request * req,const char * name,int error,bool status_valid,u32 status)648 static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
649 	bool status_valid, u32 status)
650 {
651 	switch (error) {
652 	case -EILSEQ:
653 		/* response crc error, retry the r/w cmd */
654 		pr_err("%s: %s sending %s command, card status %#x\n",
655 			req->rq_disk->disk_name, "response CRC error",
656 			name, status);
657 		return ERR_RETRY;
658 
659 	case -ETIMEDOUT:
660 		pr_err("%s: %s sending %s command, card status %#x\n",
661 			req->rq_disk->disk_name, "timed out", name, status);
662 
663 		/* If the status cmd initially failed, retry the r/w cmd */
664 		if (!status_valid)
665 			return ERR_RETRY;
666 
667 		/*
668 		 * If it was a r/w cmd crc error, or illegal command
669 		 * (eg, issued in wrong state) then retry - we should
670 		 * have corrected the state problem above.
671 		 */
672 		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
673 			return ERR_RETRY;
674 
675 		/* Otherwise abort the command */
676 		return ERR_ABORT;
677 
678 	default:
679 		/* We don't understand the error code the driver gave us */
680 		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
681 		       req->rq_disk->disk_name, error, status);
682 		return ERR_ABORT;
683 	}
684 }
685 
686 /*
687  * Initial r/w and stop cmd error recovery.
688  * We don't know whether the card received the r/w cmd or not, so try to
689  * restore things back to a sane state.  Essentially, we do this as follows:
690  * - Obtain card status.  If the first attempt to obtain card status fails,
691  *   the status word will reflect the failed status cmd, not the failed
692  *   r/w cmd.  If we fail to obtain card status, it suggests we can no
693  *   longer communicate with the card.
694  * - Check the card state.  If the card received the cmd but there was a
695  *   transient problem with the response, it might still be in a data transfer
696  *   mode.  Try to send it a stop command.  If this fails, we can't recover.
697  * - If the r/w cmd failed due to a response CRC error, it was probably
698  *   transient, so retry the cmd.
699  * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
700  * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
701  *   illegal cmd, retry.
702  * Otherwise we don't understand what happened, so abort.
703  */
mmc_blk_cmd_recovery(struct mmc_card * card,struct request * req,struct mmc_blk_request * brq,int * ecc_err)704 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
705 	struct mmc_blk_request *brq, int *ecc_err)
706 {
707 	bool prev_cmd_status_valid = true;
708 	u32 status, stop_status = 0;
709 	int err, retry;
710 
711 	if (mmc_card_removed(card))
712 		return ERR_NOMEDIUM;
713 
714 	/*
715 	 * Try to get card status which indicates both the card state
716 	 * and why there was no response.  If the first attempt fails,
717 	 * we can't be sure the returned status is for the r/w command.
718 	 */
719 	for (retry = 2; retry >= 0; retry--) {
720 		err = get_card_status(card, &status, 0);
721 		if (!err)
722 			break;
723 
724 		prev_cmd_status_valid = false;
725 		pr_err("%s: error %d sending status command, %sing\n",
726 		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
727 	}
728 
729 	/* We couldn't get a response from the card.  Give up. */
730 	if (err) {
731 		/* Check if the card is removed */
732 		if (mmc_detect_card_removed(card->host))
733 			return ERR_NOMEDIUM;
734 		return ERR_ABORT;
735 	}
736 
737 	/* Flag ECC errors */
738 	if ((status & R1_CARD_ECC_FAILED) ||
739 	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
740 	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
741 		*ecc_err = 1;
742 
743 	/*
744 	 * Check the current card state.  If it is in some data transfer
745 	 * mode, tell it to stop (and hopefully transition back to TRAN.)
746 	 */
747 	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
748 	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
749 		err = send_stop(card, &stop_status);
750 		if (err)
751 			pr_err("%s: error %d sending stop command\n",
752 			       req->rq_disk->disk_name, err);
753 
754 		/*
755 		 * If the stop cmd also timed out, the card is probably
756 		 * not present, so abort.  Other errors are bad news too.
757 		 */
758 		if (err)
759 			return ERR_ABORT;
760 		if (stop_status & R1_CARD_ECC_FAILED)
761 			*ecc_err = 1;
762 	}
763 
764 	/* Check for set block count errors */
765 	if (brq->sbc.error)
766 		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
767 				prev_cmd_status_valid, status);
768 
769 	/* Check for r/w command errors */
770 	if (brq->cmd.error)
771 		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
772 				prev_cmd_status_valid, status);
773 
774 	/* Data errors */
775 	if (!brq->stop.error)
776 		return ERR_CONTINUE;
777 
778 	/* Now for stop errors.  These aren't fatal to the transfer. */
779 	pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
780 	       req->rq_disk->disk_name, brq->stop.error,
781 	       brq->cmd.resp[0], status);
782 
783 	/*
784 	 * Subsitute in our own stop status as this will give the error
785 	 * state which happened during the execution of the r/w command.
786 	 */
787 	if (stop_status) {
788 		brq->stop.resp[0] = stop_status;
789 		brq->stop.error = 0;
790 	}
791 	return ERR_CONTINUE;
792 }
793 
mmc_blk_reset(struct mmc_blk_data * md,struct mmc_host * host,int type)794 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
795 			 int type)
796 {
797 	int err;
798 
799 	if (md->reset_done & type)
800 		return -EEXIST;
801 
802 	md->reset_done |= type;
803 	err = mmc_hw_reset(host);
804 	/* Ensure we switch back to the correct partition */
805 	if (err != -EOPNOTSUPP) {
806 		struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
807 		int part_err;
808 
809 		main_md->part_curr = main_md->part_type;
810 		part_err = mmc_blk_part_switch(host->card, md);
811 		if (part_err) {
812 			/*
813 			 * We have failed to get back into the correct
814 			 * partition, so we need to abort the whole request.
815 			 */
816 			return -ENODEV;
817 		}
818 	}
819 	return err;
820 }
821 
mmc_blk_reset_success(struct mmc_blk_data * md,int type)822 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
823 {
824 	md->reset_done &= ~type;
825 }
826 
mmc_blk_issue_discard_rq(struct mmc_queue * mq,struct request * req)827 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
828 {
829 	struct mmc_blk_data *md = mq->data;
830 	struct mmc_card *card = md->queue.card;
831 	unsigned int from, nr, arg;
832 	int err = 0, type = MMC_BLK_DISCARD;
833 
834 	if (!mmc_can_erase(card)) {
835 		err = -EOPNOTSUPP;
836 		goto out;
837 	}
838 
839 	from = blk_rq_pos(req);
840 	nr = blk_rq_sectors(req);
841 
842 	if (mmc_can_discard(card))
843 		arg = MMC_DISCARD_ARG;
844 	else if (mmc_can_trim(card))
845 		arg = MMC_TRIM_ARG;
846 	else
847 		arg = MMC_ERASE_ARG;
848 retry:
849 	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
850 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
851 				 INAND_CMD38_ARG_EXT_CSD,
852 				 arg == MMC_TRIM_ARG ?
853 				 INAND_CMD38_ARG_TRIM :
854 				 INAND_CMD38_ARG_ERASE,
855 				 0);
856 		if (err)
857 			goto out;
858 	}
859 	err = mmc_erase(card, from, nr, arg);
860 out:
861 	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
862 		goto retry;
863 	if (!err)
864 		mmc_blk_reset_success(md, type);
865 	spin_lock_irq(&md->lock);
866 	__blk_end_request(req, err, blk_rq_bytes(req));
867 	spin_unlock_irq(&md->lock);
868 
869 	return err ? 0 : 1;
870 }
871 
mmc_blk_issue_secdiscard_rq(struct mmc_queue * mq,struct request * req)872 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
873 				       struct request *req)
874 {
875 	struct mmc_blk_data *md = mq->data;
876 	struct mmc_card *card = md->queue.card;
877 	unsigned int from, nr, arg;
878 	int err = 0, type = MMC_BLK_SECDISCARD;
879 
880 	if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
881 		err = -EOPNOTSUPP;
882 		goto out;
883 	}
884 
885 	/* The sanitize operation is supported at v4.5 only */
886 	if (mmc_can_sanitize(card)) {
887 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
888 				EXT_CSD_SANITIZE_START, 1, 0);
889 		goto out;
890 	}
891 
892 	from = blk_rq_pos(req);
893 	nr = blk_rq_sectors(req);
894 
895 	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
896 		arg = MMC_SECURE_TRIM1_ARG;
897 	else
898 		arg = MMC_SECURE_ERASE_ARG;
899 retry:
900 	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
901 		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
902 				 INAND_CMD38_ARG_EXT_CSD,
903 				 arg == MMC_SECURE_TRIM1_ARG ?
904 				 INAND_CMD38_ARG_SECTRIM1 :
905 				 INAND_CMD38_ARG_SECERASE,
906 				 0);
907 		if (err)
908 			goto out;
909 	}
910 	err = mmc_erase(card, from, nr, arg);
911 	if (!err && arg == MMC_SECURE_TRIM1_ARG) {
912 		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
913 			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
914 					 INAND_CMD38_ARG_EXT_CSD,
915 					 INAND_CMD38_ARG_SECTRIM2,
916 					 0);
917 			if (err)
918 				goto out;
919 		}
920 		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
921 	}
922 out:
923 	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
924 		goto retry;
925 	if (!err)
926 		mmc_blk_reset_success(md, type);
927 	spin_lock_irq(&md->lock);
928 	__blk_end_request(req, err, blk_rq_bytes(req));
929 	spin_unlock_irq(&md->lock);
930 
931 	return err ? 0 : 1;
932 }
933 
mmc_blk_issue_flush(struct mmc_queue * mq,struct request * req)934 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
935 {
936 	struct mmc_blk_data *md = mq->data;
937 	struct mmc_card *card = md->queue.card;
938 	int ret = 0;
939 
940 	ret = mmc_flush_cache(card);
941 	if (ret)
942 		ret = -EIO;
943 
944 	spin_lock_irq(&md->lock);
945 	__blk_end_request_all(req, ret);
946 	spin_unlock_irq(&md->lock);
947 
948 	return ret ? 0 : 1;
949 }
950 
951 /*
952  * Reformat current write as a reliable write, supporting
953  * both legacy and the enhanced reliable write MMC cards.
954  * In each transfer we'll handle only as much as a single
955  * reliable write can handle, thus finish the request in
956  * partial completions.
957  */
mmc_apply_rel_rw(struct mmc_blk_request * brq,struct mmc_card * card,struct request * req)958 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
959 				    struct mmc_card *card,
960 				    struct request *req)
961 {
962 	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
963 		/* Legacy mode imposes restrictions on transfers. */
964 		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
965 			brq->data.blocks = 1;
966 
967 		if (brq->data.blocks > card->ext_csd.rel_sectors)
968 			brq->data.blocks = card->ext_csd.rel_sectors;
969 		else if (brq->data.blocks < card->ext_csd.rel_sectors)
970 			brq->data.blocks = 1;
971 	}
972 }
973 
974 #define CMD_ERRORS							\
975 	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
976 	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
977 	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
978 	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
979 	 R1_CC_ERROR |		/* Card controller error */		\
980 	 R1_ERROR)		/* General/unknown error */
981 
mmc_blk_err_check(struct mmc_card * card,struct mmc_async_req * areq)982 static int mmc_blk_err_check(struct mmc_card *card,
983 			     struct mmc_async_req *areq)
984 {
985 	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
986 						    mmc_active);
987 	struct mmc_blk_request *brq = &mq_mrq->brq;
988 	struct request *req = mq_mrq->req;
989 	int ecc_err = 0;
990 
991 	/*
992 	 * sbc.error indicates a problem with the set block count
993 	 * command.  No data will have been transferred.
994 	 *
995 	 * cmd.error indicates a problem with the r/w command.  No
996 	 * data will have been transferred.
997 	 *
998 	 * stop.error indicates a problem with the stop command.  Data
999 	 * may have been transferred, or may still be transferring.
1000 	 */
1001 	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
1002 	    brq->data.error) {
1003 		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
1004 		case ERR_RETRY:
1005 			return MMC_BLK_RETRY;
1006 		case ERR_ABORT:
1007 			return MMC_BLK_ABORT;
1008 		case ERR_NOMEDIUM:
1009 			return MMC_BLK_NOMEDIUM;
1010 		case ERR_CONTINUE:
1011 			break;
1012 		}
1013 	}
1014 
1015 	/*
1016 	 * Check for errors relating to the execution of the
1017 	 * initial command - such as address errors.  No data
1018 	 * has been transferred.
1019 	 */
1020 	if (brq->cmd.resp[0] & CMD_ERRORS) {
1021 		pr_err("%s: r/w command failed, status = %#x\n",
1022 		       req->rq_disk->disk_name, brq->cmd.resp[0]);
1023 		return MMC_BLK_ABORT;
1024 	}
1025 
1026 	/*
1027 	 * Everything else is either success, or a data error of some
1028 	 * kind.  If it was a write, we may have transitioned to
1029 	 * program mode, which we have to wait for it to complete.
1030 	 */
1031 	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
1032 		u32 status;
1033 		do {
1034 			int err = get_card_status(card, &status, 5);
1035 			if (err) {
1036 				pr_err("%s: error %d requesting status\n",
1037 				       req->rq_disk->disk_name, err);
1038 				return MMC_BLK_CMD_ERR;
1039 			}
1040 			/*
1041 			 * Some cards mishandle the status bits,
1042 			 * so make sure to check both the busy
1043 			 * indication and the card state.
1044 			 */
1045 		} while (!(status & R1_READY_FOR_DATA) ||
1046 			 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1047 	}
1048 
1049 	if (brq->data.error) {
1050 		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1051 		       req->rq_disk->disk_name, brq->data.error,
1052 		       (unsigned)blk_rq_pos(req),
1053 		       (unsigned)blk_rq_sectors(req),
1054 		       brq->cmd.resp[0], brq->stop.resp[0]);
1055 
1056 		if (rq_data_dir(req) == READ) {
1057 			if (ecc_err)
1058 				return MMC_BLK_ECC_ERR;
1059 			return MMC_BLK_DATA_ERR;
1060 		} else {
1061 			return MMC_BLK_CMD_ERR;
1062 		}
1063 	}
1064 
1065 	if (!brq->data.bytes_xfered)
1066 		return MMC_BLK_RETRY;
1067 
1068 	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
1069 		return MMC_BLK_PARTIAL;
1070 
1071 	return MMC_BLK_SUCCESS;
1072 }
1073 
mmc_blk_rw_rq_prep(struct mmc_queue_req * mqrq,struct mmc_card * card,int disable_multi,struct mmc_queue * mq)1074 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1075 			       struct mmc_card *card,
1076 			       int disable_multi,
1077 			       struct mmc_queue *mq)
1078 {
1079 	u32 readcmd, writecmd;
1080 	struct mmc_blk_request *brq = &mqrq->brq;
1081 	struct request *req = mqrq->req;
1082 	struct mmc_blk_data *md = mq->data;
1083 
1084 	/*
1085 	 * Reliable writes are used to implement Forced Unit Access and
1086 	 * REQ_META accesses, and are supported only on MMCs.
1087 	 *
1088 	 * XXX: this really needs a good explanation of why REQ_META
1089 	 * is treated special.
1090 	 */
1091 	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1092 			  (req->cmd_flags & REQ_META)) &&
1093 		(rq_data_dir(req) == WRITE) &&
1094 		(md->flags & MMC_BLK_REL_WR);
1095 
1096 	memset(brq, 0, sizeof(struct mmc_blk_request));
1097 	brq->mrq.cmd = &brq->cmd;
1098 	brq->mrq.data = &brq->data;
1099 
1100 	brq->cmd.arg = blk_rq_pos(req);
1101 	if (!mmc_card_blockaddr(card))
1102 		brq->cmd.arg <<= 9;
1103 	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1104 	brq->data.blksz = 512;
1105 	brq->stop.opcode = MMC_STOP_TRANSMISSION;
1106 	brq->stop.arg = 0;
1107 	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1108 	brq->data.blocks = blk_rq_sectors(req);
1109 
1110 	/*
1111 	 * The block layer doesn't support all sector count
1112 	 * restrictions, so we need to be prepared for too big
1113 	 * requests.
1114 	 */
1115 	if (brq->data.blocks > card->host->max_blk_count)
1116 		brq->data.blocks = card->host->max_blk_count;
1117 
1118 	if (brq->data.blocks > 1) {
1119 		/*
1120 		 * After a read error, we redo the request one sector
1121 		 * at a time in order to accurately determine which
1122 		 * sectors can be read successfully.
1123 		 */
1124 		if (disable_multi)
1125 			brq->data.blocks = 1;
1126 
1127 		/* Some controllers can't do multiblock reads due to hw bugs */
1128 		if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
1129 		    rq_data_dir(req) == READ)
1130 			brq->data.blocks = 1;
1131 	}
1132 
1133 	if (brq->data.blocks > 1 || do_rel_wr) {
1134 		/* SPI multiblock writes terminate using a special
1135 		 * token, not a STOP_TRANSMISSION request.
1136 		 */
1137 		if (!mmc_host_is_spi(card->host) ||
1138 		    rq_data_dir(req) == READ)
1139 			brq->mrq.stop = &brq->stop;
1140 		readcmd = MMC_READ_MULTIPLE_BLOCK;
1141 		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1142 	} else {
1143 		brq->mrq.stop = NULL;
1144 		readcmd = MMC_READ_SINGLE_BLOCK;
1145 		writecmd = MMC_WRITE_BLOCK;
1146 	}
1147 	if (rq_data_dir(req) == READ) {
1148 		brq->cmd.opcode = readcmd;
1149 		brq->data.flags |= MMC_DATA_READ;
1150 	} else {
1151 		brq->cmd.opcode = writecmd;
1152 		brq->data.flags |= MMC_DATA_WRITE;
1153 	}
1154 
1155 	if (do_rel_wr)
1156 		mmc_apply_rel_rw(brq, card, req);
1157 
1158 	/*
1159 	 * Pre-defined multi-block transfers are preferable to
1160 	 * open ended-ones (and necessary for reliable writes).
1161 	 * However, it is not sufficient to just send CMD23,
1162 	 * and avoid the final CMD12, as on an error condition
1163 	 * CMD12 (stop) needs to be sent anyway. This, coupled
1164 	 * with Auto-CMD23 enhancements provided by some
1165 	 * hosts, means that the complexity of dealing
1166 	 * with this is best left to the host. If CMD23 is
1167 	 * supported by card and host, we'll fill sbc in and let
1168 	 * the host deal with handling it correctly. This means
1169 	 * that for hosts that don't expose MMC_CAP_CMD23, no
1170 	 * change of behavior will be observed.
1171 	 *
1172 	 * N.B: Some MMC cards experience perf degradation.
1173 	 * We'll avoid using CMD23-bounded multiblock writes for
1174 	 * these, while retaining features like reliable writes.
1175 	 */
1176 
1177 	if ((md->flags & MMC_BLK_CMD23) &&
1178 	    mmc_op_multi(brq->cmd.opcode) &&
1179 	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1180 		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1181 		brq->sbc.arg = brq->data.blocks |
1182 			(do_rel_wr ? (1 << 31) : 0);
1183 		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1184 		brq->mrq.sbc = &brq->sbc;
1185 	}
1186 
1187 	mmc_set_data_timeout(&brq->data, card);
1188 
1189 	brq->data.sg = mqrq->sg;
1190 	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1191 
1192 	/*
1193 	 * Adjust the sg list so it is the same size as the
1194 	 * request.
1195 	 */
1196 	if (brq->data.blocks != blk_rq_sectors(req)) {
1197 		int i, data_size = brq->data.blocks << 9;
1198 		struct scatterlist *sg;
1199 
1200 		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1201 			data_size -= sg->length;
1202 			if (data_size <= 0) {
1203 				sg->length += data_size;
1204 				i++;
1205 				break;
1206 			}
1207 		}
1208 		brq->data.sg_len = i;
1209 	}
1210 
1211 	mqrq->mmc_active.mrq = &brq->mrq;
1212 	mqrq->mmc_active.err_check = mmc_blk_err_check;
1213 
1214 	mmc_queue_bounce_pre(mqrq);
1215 }
1216 
mmc_blk_cmd_err(struct mmc_blk_data * md,struct mmc_card * card,struct mmc_blk_request * brq,struct request * req,int ret)1217 static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
1218 			   struct mmc_blk_request *brq, struct request *req,
1219 			   int ret)
1220 {
1221 	/*
1222 	 * If this is an SD card and we're writing, we can first
1223 	 * mark the known good sectors as ok.
1224 	 *
1225 	 * If the card is not SD, we can still ok written sectors
1226 	 * as reported by the controller (which might be less than
1227 	 * the real number of written sectors, but never more).
1228 	 */
1229 	if (mmc_card_sd(card)) {
1230 		u32 blocks;
1231 
1232 		blocks = mmc_sd_num_wr_blocks(card);
1233 		if (blocks != (u32)-1) {
1234 			spin_lock_irq(&md->lock);
1235 			ret = __blk_end_request(req, 0, blocks << 9);
1236 			spin_unlock_irq(&md->lock);
1237 		}
1238 	} else {
1239 		spin_lock_irq(&md->lock);
1240 		ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1241 		spin_unlock_irq(&md->lock);
1242 	}
1243 	return ret;
1244 }
1245 
mmc_blk_issue_rw_rq(struct mmc_queue * mq,struct request * rqc)1246 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1247 {
1248 	struct mmc_blk_data *md = mq->data;
1249 	struct mmc_card *card = md->queue.card;
1250 	struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
1251 	int ret = 1, disable_multi = 0, retry = 0, type;
1252 	enum mmc_blk_status status;
1253 	struct mmc_queue_req *mq_rq;
1254 	struct request *req;
1255 	struct mmc_async_req *areq;
1256 
1257 	if (!rqc && !mq->mqrq_prev->req)
1258 		return 0;
1259 
1260 	do {
1261 		if (rqc) {
1262 			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1263 			areq = &mq->mqrq_cur->mmc_active;
1264 		} else
1265 			areq = NULL;
1266 		areq = mmc_start_req(card->host, areq, (int *) &status);
1267 		if (!areq)
1268 			return 0;
1269 
1270 		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1271 		brq = &mq_rq->brq;
1272 		req = mq_rq->req;
1273 		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1274 		mmc_queue_bounce_post(mq_rq);
1275 
1276 		switch (status) {
1277 		case MMC_BLK_SUCCESS:
1278 		case MMC_BLK_PARTIAL:
1279 			/*
1280 			 * A block was successfully transferred.
1281 			 */
1282 			mmc_blk_reset_success(md, type);
1283 			spin_lock_irq(&md->lock);
1284 			ret = __blk_end_request(req, 0,
1285 						brq->data.bytes_xfered);
1286 			spin_unlock_irq(&md->lock);
1287 			/*
1288 			 * If the blk_end_request function returns non-zero even
1289 			 * though all data has been transferred and no errors
1290 			 * were returned by the host controller, it's a bug.
1291 			 */
1292 			if (status == MMC_BLK_SUCCESS && ret) {
1293 				pr_err("%s BUG rq_tot %d d_xfer %d\n",
1294 				       __func__, blk_rq_bytes(req),
1295 				       brq->data.bytes_xfered);
1296 				rqc = NULL;
1297 				goto cmd_abort;
1298 			}
1299 			break;
1300 		case MMC_BLK_CMD_ERR:
1301 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
1302 			if (!mmc_blk_reset(md, card->host, type))
1303 				break;
1304 			goto cmd_abort;
1305 		case MMC_BLK_RETRY:
1306 			if (retry++ < 5)
1307 				break;
1308 			/* Fall through */
1309 		case MMC_BLK_ABORT:
1310 			if (!mmc_blk_reset(md, card->host, type))
1311 				break;
1312 			goto cmd_abort;
1313 		case MMC_BLK_DATA_ERR: {
1314 			int err;
1315 
1316 			err = mmc_blk_reset(md, card->host, type);
1317 			if (!err)
1318 				break;
1319 			if (err == -ENODEV)
1320 				goto cmd_abort;
1321 			/* Fall through */
1322 		}
1323 		case MMC_BLK_ECC_ERR:
1324 			if (brq->data.blocks > 1) {
1325 				/* Redo read one sector at a time */
1326 				pr_warning("%s: retrying using single block read\n",
1327 					   req->rq_disk->disk_name);
1328 				disable_multi = 1;
1329 				break;
1330 			}
1331 			/*
1332 			 * After an error, we redo I/O one sector at a
1333 			 * time, so we only reach here after trying to
1334 			 * read a single sector.
1335 			 */
1336 			spin_lock_irq(&md->lock);
1337 			ret = __blk_end_request(req, -EIO,
1338 						brq->data.blksz);
1339 			spin_unlock_irq(&md->lock);
1340 			if (!ret)
1341 				goto start_new_req;
1342 			break;
1343 		case MMC_BLK_NOMEDIUM:
1344 			goto cmd_abort;
1345 		}
1346 
1347 		if (ret) {
1348 			/*
1349 			 * In case of a incomplete request
1350 			 * prepare it again and resend.
1351 			 */
1352 			mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1353 			mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1354 		}
1355 	} while (ret);
1356 
1357 	return 1;
1358 
1359  cmd_abort:
1360 	spin_lock_irq(&md->lock);
1361 	if (mmc_card_removed(card))
1362 		req->cmd_flags |= REQ_QUIET;
1363 	while (ret)
1364 		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1365 	spin_unlock_irq(&md->lock);
1366 
1367  start_new_req:
1368 	if (rqc) {
1369 		mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1370 		mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1371 	}
1372 
1373 	return 0;
1374 }
1375 
mmc_blk_issue_rq(struct mmc_queue * mq,struct request * req)1376 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1377 {
1378 	int ret;
1379 	struct mmc_blk_data *md = mq->data;
1380 	struct mmc_card *card = md->queue.card;
1381 
1382 	if (req && !mq->mqrq_prev->req)
1383 		/* claim host only for the first request */
1384 		mmc_claim_host(card->host);
1385 
1386 	ret = mmc_blk_part_switch(card, md);
1387 	if (ret) {
1388 		if (req) {
1389 			spin_lock_irq(&md->lock);
1390 			__blk_end_request_all(req, -EIO);
1391 			spin_unlock_irq(&md->lock);
1392 		}
1393 		ret = 0;
1394 		goto out;
1395 	}
1396 
1397 	if (req && req->cmd_flags & REQ_DISCARD) {
1398 		/* complete ongoing async transfer before issuing discard */
1399 		if (card->host->areq)
1400 			mmc_blk_issue_rw_rq(mq, NULL);
1401 		if (req->cmd_flags & REQ_SECURE)
1402 			ret = mmc_blk_issue_secdiscard_rq(mq, req);
1403 		else
1404 			ret = mmc_blk_issue_discard_rq(mq, req);
1405 	} else if (req && req->cmd_flags & REQ_FLUSH) {
1406 		/* complete ongoing async transfer before issuing flush */
1407 		if (card->host->areq)
1408 			mmc_blk_issue_rw_rq(mq, NULL);
1409 		ret = mmc_blk_issue_flush(mq, req);
1410 	} else {
1411 		ret = mmc_blk_issue_rw_rq(mq, req);
1412 	}
1413 
1414 out:
1415 	if (!req)
1416 		/* release host only when there are no more requests */
1417 		mmc_release_host(card->host);
1418 	return ret;
1419 }
1420 
mmc_blk_readonly(struct mmc_card * card)1421 static inline int mmc_blk_readonly(struct mmc_card *card)
1422 {
1423 	return mmc_card_readonly(card) ||
1424 	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
1425 }
1426 
mmc_blk_alloc_req(struct mmc_card * card,struct device * parent,sector_t size,bool default_ro,const char * subname,int area_type)1427 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1428 					      struct device *parent,
1429 					      sector_t size,
1430 					      bool default_ro,
1431 					      const char *subname,
1432 					      int area_type)
1433 {
1434 	struct mmc_blk_data *md;
1435 	int devidx, ret;
1436 
1437 	devidx = find_first_zero_bit(dev_use, max_devices);
1438 	if (devidx >= max_devices)
1439 		return ERR_PTR(-ENOSPC);
1440 	__set_bit(devidx, dev_use);
1441 
1442 	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
1443 	if (!md) {
1444 		ret = -ENOMEM;
1445 		goto out;
1446 	}
1447 
1448 	/*
1449 	 * !subname implies we are creating main mmc_blk_data that will be
1450 	 * associated with mmc_card with mmc_set_drvdata. Due to device
1451 	 * partitions, devidx will not coincide with a per-physical card
1452 	 * index anymore so we keep track of a name index.
1453 	 */
1454 	if (!subname) {
1455 		md->name_idx = find_first_zero_bit(name_use, max_devices);
1456 		__set_bit(md->name_idx, name_use);
1457 	} else
1458 		md->name_idx = ((struct mmc_blk_data *)
1459 				dev_to_disk(parent)->private_data)->name_idx;
1460 
1461 	md->area_type = area_type;
1462 
1463 	/*
1464 	 * Set the read-only status based on the supported commands
1465 	 * and the write protect switch.
1466 	 */
1467 	md->read_only = mmc_blk_readonly(card);
1468 
1469 	md->disk = alloc_disk(perdev_minors);
1470 	if (md->disk == NULL) {
1471 		ret = -ENOMEM;
1472 		goto err_kfree;
1473 	}
1474 
1475 	spin_lock_init(&md->lock);
1476 	INIT_LIST_HEAD(&md->part);
1477 	md->usage = 1;
1478 
1479 	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
1480 	if (ret)
1481 		goto err_putdisk;
1482 
1483 	md->queue.issue_fn = mmc_blk_issue_rq;
1484 	md->queue.data = md;
1485 
1486 	md->disk->major	= MMC_BLOCK_MAJOR;
1487 	md->disk->first_minor = devidx * perdev_minors;
1488 	md->disk->fops = &mmc_bdops;
1489 	md->disk->private_data = md;
1490 	md->disk->queue = md->queue.queue;
1491 	md->disk->driverfs_dev = parent;
1492 	set_disk_ro(md->disk, md->read_only || default_ro);
1493 
1494 	/*
1495 	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
1496 	 *
1497 	 * - be set for removable media with permanent block devices
1498 	 * - be unset for removable block devices with permanent media
1499 	 *
1500 	 * Since MMC block devices clearly fall under the second
1501 	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
1502 	 * should use the block device creation/destruction hotplug
1503 	 * messages to tell when the card is present.
1504 	 */
1505 
1506 	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1507 		 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1508 
1509 	blk_queue_logical_block_size(md->queue.queue, 512);
1510 	set_capacity(md->disk, size);
1511 
1512 	if (mmc_host_cmd23(card->host)) {
1513 		if (mmc_card_mmc(card) ||
1514 		    (mmc_card_sd(card) &&
1515 		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1516 			md->flags |= MMC_BLK_CMD23;
1517 	}
1518 
1519 	if (mmc_card_mmc(card) &&
1520 	    md->flags & MMC_BLK_CMD23 &&
1521 	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
1522 	     card->ext_csd.rel_sectors)) {
1523 		md->flags |= MMC_BLK_REL_WR;
1524 		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
1525 	}
1526 
1527 	return md;
1528 
1529  err_putdisk:
1530 	put_disk(md->disk);
1531  err_kfree:
1532 	kfree(md);
1533  out:
1534 	return ERR_PTR(ret);
1535 }
1536 
mmc_blk_alloc(struct mmc_card * card)1537 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
1538 {
1539 	sector_t size;
1540 	struct mmc_blk_data *md;
1541 
1542 	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
1543 		/*
1544 		 * The EXT_CSD sector count is in number or 512 byte
1545 		 * sectors.
1546 		 */
1547 		size = card->ext_csd.sectors;
1548 	} else {
1549 		/*
1550 		 * The CSD capacity field is in units of read_blkbits.
1551 		 * set_capacity takes units of 512 bytes.
1552 		 */
1553 		size = card->csd.capacity << (card->csd.read_blkbits - 9);
1554 	}
1555 
1556 	md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
1557 					MMC_BLK_DATA_AREA_MAIN);
1558 	return md;
1559 }
1560 
mmc_blk_alloc_part(struct mmc_card * card,struct mmc_blk_data * md,unsigned int part_type,sector_t size,bool default_ro,const char * subname,int area_type)1561 static int mmc_blk_alloc_part(struct mmc_card *card,
1562 			      struct mmc_blk_data *md,
1563 			      unsigned int part_type,
1564 			      sector_t size,
1565 			      bool default_ro,
1566 			      const char *subname,
1567 			      int area_type)
1568 {
1569 	char cap_str[10];
1570 	struct mmc_blk_data *part_md;
1571 
1572 	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
1573 				    subname, area_type);
1574 	if (IS_ERR(part_md))
1575 		return PTR_ERR(part_md);
1576 	part_md->part_type = part_type;
1577 	list_add(&part_md->part, &md->part);
1578 
1579 	string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
1580 			cap_str, sizeof(cap_str));
1581 	pr_info("%s: %s %s partition %u %s\n",
1582 	       part_md->disk->disk_name, mmc_card_id(card),
1583 	       mmc_card_name(card), part_md->part_type, cap_str);
1584 	return 0;
1585 }
1586 
1587 /* MMC Physical partitions consist of two boot partitions and
1588  * up to four general purpose partitions.
1589  * For each partition enabled in EXT_CSD a block device will be allocatedi
1590  * to provide access to the partition.
1591  */
1592 
mmc_blk_alloc_parts(struct mmc_card * card,struct mmc_blk_data * md)1593 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
1594 {
1595 	int idx, ret = 0;
1596 
1597 	if (!mmc_card_mmc(card))
1598 		return 0;
1599 
1600 	for (idx = 0; idx < card->nr_parts; idx++) {
1601 		if (card->part[idx].size) {
1602 			ret = mmc_blk_alloc_part(card, md,
1603 				card->part[idx].part_cfg,
1604 				card->part[idx].size >> 9,
1605 				card->part[idx].force_ro,
1606 				card->part[idx].name,
1607 				card->part[idx].area_type);
1608 			if (ret)
1609 				return ret;
1610 		}
1611 	}
1612 
1613 	return ret;
1614 }
1615 
1616 static int
mmc_blk_set_blksize(struct mmc_blk_data * md,struct mmc_card * card)1617 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
1618 {
1619 	int err;
1620 
1621 	mmc_claim_host(card->host);
1622 	err = mmc_set_blocklen(card, 512);
1623 	mmc_release_host(card->host);
1624 
1625 	if (err) {
1626 		pr_err("%s: unable to set block size to 512: %d\n",
1627 			md->disk->disk_name, err);
1628 		return -EINVAL;
1629 	}
1630 
1631 	return 0;
1632 }
1633 
mmc_blk_remove_req(struct mmc_blk_data * md)1634 static void mmc_blk_remove_req(struct mmc_blk_data *md)
1635 {
1636 	struct mmc_card *card;
1637 
1638 	if (md) {
1639 		card = md->queue.card;
1640 		if (md->disk->flags & GENHD_FL_UP) {
1641 			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1642 			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1643 					card->ext_csd.boot_ro_lockable)
1644 				device_remove_file(disk_to_dev(md->disk),
1645 					&md->power_ro_lock);
1646 
1647 			/* Stop new requests from getting into the queue */
1648 			del_gendisk(md->disk);
1649 		}
1650 
1651 		/* Then flush out any already in there */
1652 		mmc_cleanup_queue(&md->queue);
1653 		mmc_blk_put(md);
1654 	}
1655 }
1656 
mmc_blk_remove_parts(struct mmc_card * card,struct mmc_blk_data * md)1657 static void mmc_blk_remove_parts(struct mmc_card *card,
1658 				 struct mmc_blk_data *md)
1659 {
1660 	struct list_head *pos, *q;
1661 	struct mmc_blk_data *part_md;
1662 
1663 	__clear_bit(md->name_idx, name_use);
1664 	list_for_each_safe(pos, q, &md->part) {
1665 		part_md = list_entry(pos, struct mmc_blk_data, part);
1666 		list_del(pos);
1667 		mmc_blk_remove_req(part_md);
1668 	}
1669 }
1670 
mmc_add_disk(struct mmc_blk_data * md)1671 static int mmc_add_disk(struct mmc_blk_data *md)
1672 {
1673 	int ret;
1674 	struct mmc_card *card = md->queue.card;
1675 
1676 	add_disk(md->disk);
1677 	md->force_ro.show = force_ro_show;
1678 	md->force_ro.store = force_ro_store;
1679 	sysfs_attr_init(&md->force_ro.attr);
1680 	md->force_ro.attr.name = "force_ro";
1681 	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
1682 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
1683 	if (ret)
1684 		goto force_ro_fail;
1685 
1686 	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
1687 	     card->ext_csd.boot_ro_lockable) {
1688 		mode_t mode;
1689 
1690 		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
1691 			mode = S_IRUGO;
1692 		else
1693 			mode = S_IRUGO | S_IWUSR;
1694 
1695 		md->power_ro_lock.show = power_ro_lock_show;
1696 		md->power_ro_lock.store = power_ro_lock_store;
1697 		sysfs_attr_init(&md->power_ro_lock.attr);
1698 		md->power_ro_lock.attr.mode = mode;
1699 		md->power_ro_lock.attr.name =
1700 					"ro_lock_until_next_power_on";
1701 		ret = device_create_file(disk_to_dev(md->disk),
1702 				&md->power_ro_lock);
1703 		if (ret)
1704 			goto power_ro_lock_fail;
1705 	}
1706 	return ret;
1707 
1708 power_ro_lock_fail:
1709 	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
1710 force_ro_fail:
1711 	del_gendisk(md->disk);
1712 
1713 	return ret;
1714 }
1715 
1716 #define CID_MANFID_SANDISK	0x2
1717 #define CID_MANFID_TOSHIBA	0x11
1718 #define CID_MANFID_MICRON	0x13
1719 
1720 static const struct mmc_fixup blk_fixups[] =
1721 {
1722 	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
1723 		  MMC_QUIRK_INAND_CMD38),
1724 	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
1725 		  MMC_QUIRK_INAND_CMD38),
1726 	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
1727 		  MMC_QUIRK_INAND_CMD38),
1728 	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
1729 		  MMC_QUIRK_INAND_CMD38),
1730 	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
1731 		  MMC_QUIRK_INAND_CMD38),
1732 
1733 	/*
1734 	 * Some MMC cards experience performance degradation with CMD23
1735 	 * instead of CMD12-bounded multiblock transfers. For now we'll
1736 	 * black list what's bad...
1737 	 * - Certain Toshiba cards.
1738 	 *
1739 	 * N.B. This doesn't affect SD cards.
1740 	 */
1741 	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1742 		  MMC_QUIRK_BLK_NO_CMD23),
1743 	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1744 		  MMC_QUIRK_BLK_NO_CMD23),
1745 	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
1746 		  MMC_QUIRK_BLK_NO_CMD23),
1747 
1748 	/*
1749 	 * Some Micron MMC cards needs longer data read timeout than
1750 	 * indicated in CSD.
1751 	 */
1752 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
1753 		  MMC_QUIRK_LONG_READ_TIME),
1754 
1755 	END_FIXUP
1756 };
1757 
mmc_blk_probe(struct mmc_card * card)1758 static int mmc_blk_probe(struct mmc_card *card)
1759 {
1760 	struct mmc_blk_data *md, *part_md;
1761 	int err;
1762 	char cap_str[10];
1763 
1764 	/*
1765 	 * Check that the card supports the command class(es) we need.
1766 	 */
1767 	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1768 		return -ENODEV;
1769 
1770 	md = mmc_blk_alloc(card);
1771 	if (IS_ERR(md))
1772 		return PTR_ERR(md);
1773 
1774 	err = mmc_blk_set_blksize(md, card);
1775 	if (err)
1776 		goto out;
1777 
1778 	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1779 			cap_str, sizeof(cap_str));
1780 	pr_info("%s: %s %s %s %s\n",
1781 		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1782 		cap_str, md->read_only ? "(ro)" : "");
1783 
1784 	if (mmc_blk_alloc_parts(card, md))
1785 		goto out;
1786 
1787 	mmc_set_drvdata(card, md);
1788 	mmc_fixup_device(card, blk_fixups);
1789 
1790 	if (mmc_add_disk(md))
1791 		goto out;
1792 
1793 	list_for_each_entry(part_md, &md->part, part) {
1794 		if (mmc_add_disk(part_md))
1795 			goto out;
1796 	}
1797 	return 0;
1798 
1799  out:
1800 	mmc_blk_remove_parts(card, md);
1801 	mmc_blk_remove_req(md);
1802 	return err;
1803 }
1804 
mmc_blk_remove(struct mmc_card * card)1805 static void mmc_blk_remove(struct mmc_card *card)
1806 {
1807 	struct mmc_blk_data *md = mmc_get_drvdata(card);
1808 
1809 	mmc_blk_remove_parts(card, md);
1810 	mmc_claim_host(card->host);
1811 	mmc_blk_part_switch(card, md);
1812 	mmc_release_host(card->host);
1813 	mmc_blk_remove_req(md);
1814 	mmc_set_drvdata(card, NULL);
1815 }
1816 
1817 #ifdef CONFIG_PM
mmc_blk_suspend(struct mmc_card * card,pm_message_t state)1818 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1819 {
1820 	struct mmc_blk_data *part_md;
1821 	struct mmc_blk_data *md = mmc_get_drvdata(card);
1822 
1823 	if (md) {
1824 		mmc_queue_suspend(&md->queue);
1825 		list_for_each_entry(part_md, &md->part, part) {
1826 			mmc_queue_suspend(&part_md->queue);
1827 		}
1828 	}
1829 	return 0;
1830 }
1831 
mmc_blk_resume(struct mmc_card * card)1832 static int mmc_blk_resume(struct mmc_card *card)
1833 {
1834 	struct mmc_blk_data *part_md;
1835 	struct mmc_blk_data *md = mmc_get_drvdata(card);
1836 
1837 	if (md) {
1838 		mmc_blk_set_blksize(md, card);
1839 
1840 		/*
1841 		 * Resume involves the card going into idle state,
1842 		 * so current partition is always the main one.
1843 		 */
1844 		md->part_curr = md->part_type;
1845 		mmc_queue_resume(&md->queue);
1846 		list_for_each_entry(part_md, &md->part, part) {
1847 			mmc_queue_resume(&part_md->queue);
1848 		}
1849 	}
1850 	return 0;
1851 }
1852 #else
1853 #define	mmc_blk_suspend	NULL
1854 #define mmc_blk_resume	NULL
1855 #endif
1856 
1857 static struct mmc_driver mmc_driver = {
1858 	.drv		= {
1859 		.name	= "mmcblk",
1860 	},
1861 	.probe		= mmc_blk_probe,
1862 	.remove		= mmc_blk_remove,
1863 	.suspend	= mmc_blk_suspend,
1864 	.resume		= mmc_blk_resume,
1865 };
1866 
mmc_blk_init(void)1867 static int __init mmc_blk_init(void)
1868 {
1869 	int res;
1870 
1871 	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1872 		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1873 
1874 	max_devices = 256 / perdev_minors;
1875 
1876 	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1877 	if (res)
1878 		goto out;
1879 
1880 	res = mmc_register_driver(&mmc_driver);
1881 	if (res)
1882 		goto out2;
1883 
1884 	return 0;
1885  out2:
1886 	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1887  out:
1888 	return res;
1889 }
1890 
mmc_blk_exit(void)1891 static void __exit mmc_blk_exit(void)
1892 {
1893 	mmc_unregister_driver(&mmc_driver);
1894 	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1895 }
1896 
1897 module_init(mmc_blk_init);
1898 module_exit(mmc_blk_exit);
1899 
1900 MODULE_LICENSE("GPL");
1901 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1902 
1903