Lines Matching +full:512 +full:- +full:bytes

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Pierre Ossman
43 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
53 * struct mmc_test_mem - allocated memory.
63 * struct mmc_test_area - information for performance tests.
64 * @max_sz: test area size (in bytes)
66 * @max_tfr: maximum transfer size allowed by driver (in bytes)
69 * @blocks: number of (512 byte) blocks currently mapped by @sg
73 * @sg_areq: scatterlist for non-blocking request
89 * struct mmc_test_transfer_result - transfer results for performance tests.
90 * @link: double-linked list
107 * struct mmc_test_general_result - results for tests.
108 * @link: double-linked list
123 * struct mmc_test_dbgfs_file - debugfs related file.
124 * @link: double-linked list
135 * struct mmc_test_card - test information.
180 return mmc_set_blocklen(test->card, size); in mmc_test_set_blksize()
186 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT); in mmc_test_card_cmd23()
192 struct mmc_card *card = test->card; in mmc_test_prepare_sbc()
194 if (!mrq->sbc || !mmc_host_cmd23(card->host) || in mmc_test_prepare_sbc()
195 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) || in mmc_test_prepare_sbc()
196 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { in mmc_test_prepare_sbc()
197 mrq->sbc = NULL; in mmc_test_prepare_sbc()
201 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT; in mmc_test_prepare_sbc()
202 mrq->sbc->arg = blocks; in mmc_test_prepare_sbc()
203 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; in mmc_test_prepare_sbc()
213 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop)) in mmc_test_prepare_mrq()
217 mrq->cmd->opcode = write ? in mmc_test_prepare_mrq()
220 mrq->cmd->opcode = write ? in mmc_test_prepare_mrq()
224 mrq->cmd->arg = dev_addr; in mmc_test_prepare_mrq()
225 if (!mmc_card_blockaddr(test->card)) in mmc_test_prepare_mrq()
226 mrq->cmd->arg <<= 9; in mmc_test_prepare_mrq()
228 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; in mmc_test_prepare_mrq()
231 mrq->stop = NULL; in mmc_test_prepare_mrq()
233 mrq->stop->opcode = MMC_STOP_TRANSMISSION; in mmc_test_prepare_mrq()
234 mrq->stop->arg = 0; in mmc_test_prepare_mrq()
235 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; in mmc_test_prepare_mrq()
238 mrq->data->blksz = blksz; in mmc_test_prepare_mrq()
239 mrq->data->blocks = blocks; in mmc_test_prepare_mrq()
240 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; in mmc_test_prepare_mrq()
241 mrq->data->sg = sg; in mmc_test_prepare_mrq()
242 mrq->data->sg_len = sg_len; in mmc_test_prepare_mrq()
246 mmc_set_data_timeout(mrq->data, test->card); in mmc_test_prepare_mrq()
251 return !(cmd->resp[0] & R1_READY_FOR_DATA) || in mmc_test_busy()
252 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); in mmc_test_busy()
268 cmd.arg = test->card->rca << 16; in mmc_test_wait_busy()
271 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); in mmc_test_wait_busy()
277 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) in mmc_test_wait_busy()
279 mmc_hostname(test->card->host)); in mmc_test_wait_busy()
307 mmc_wait_for_req(test->card->host, &mrq); in mmc_test_buffer_transfer()
321 while (mem->cnt--) in mmc_test_free_mem()
322 __free_pages(mem->arr[mem->cnt].page, in mmc_test_free_mem()
323 mem->arr[mem->cnt].order); in mmc_test_free_mem()
324 kfree(mem->arr); in mmc_test_free_mem()
361 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL); in mmc_test_alloc_mem()
362 if (!mem->arr) in mmc_test_alloc_mem()
376 order -= 1; in mmc_test_alloc_mem()
383 mem->arr[mem->cnt].page = page; in mmc_test_alloc_mem()
384 mem->arr[mem->cnt].order = order; in mmc_test_alloc_mem()
385 mem->cnt += 1; in mmc_test_alloc_mem()
388 max_page_cnt -= 1UL << order; in mmc_test_alloc_mem()
390 if (mem->cnt >= max_segs) { in mmc_test_alloc_mem()
423 for (i = 0; i < mem->cnt; i++) { in mmc_test_map_sg()
424 unsigned long len = PAGE_SIZE << mem->arr[i].order; in mmc_test_map_sg()
427 len = ALIGN(size / min_sg_len, 512); in mmc_test_map_sg()
437 return -EINVAL; in mmc_test_map_sg()
438 sg_set_page(sg, mem->arr[i].page, len, 0); in mmc_test_map_sg()
439 sz -= len; in mmc_test_map_sg()
447 return -EINVAL; in mmc_test_map_sg()
467 unsigned int i = mem->cnt, cnt; in mmc_test_map_sg_max_scatter()
475 base = page_address(mem->arr[--i].page); in mmc_test_map_sg_max_scatter()
476 cnt = 1 << mem->arr[i].order; in mmc_test_map_sg_max_scatter()
478 addr = base + PAGE_SIZE * --cnt; in mmc_test_map_sg_max_scatter()
492 return -EINVAL; in mmc_test_map_sg_max_scatter()
494 sz -= len; in mmc_test_map_sg_max_scatter()
498 i = mem->cnt; in mmc_test_map_sg_max_scatter()
508 * Calculate transfer rate in bytes per second.
510 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts) in mmc_test_rate() argument
515 bytes *= 1000000000; in mmc_test_rate()
518 bytes >>= 1; in mmc_test_rate()
525 do_div(bytes, (uint32_t)ns); in mmc_test_rate()
527 return bytes; in mmc_test_rate()
539 if (!test->gr) in mmc_test_save_transfer_result()
546 tr->count = count; in mmc_test_save_transfer_result()
547 tr->sectors = sectors; in mmc_test_save_transfer_result()
548 tr->ts = ts; in mmc_test_save_transfer_result()
549 tr->rate = rate; in mmc_test_save_transfer_result()
550 tr->iops = iops; in mmc_test_save_transfer_result()
552 list_add_tail(&tr->link, &test->gr->tr_lst); in mmc_test_save_transfer_result()
558 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, in mmc_test_print_rate() argument
561 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_rate()
566 rate = mmc_test_rate(bytes, &ts); in mmc_test_print_rate()
571 mmc_hostname(test->card->host), sectors, sectors >> 1, in mmc_test_print_rate()
582 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, in mmc_test_print_avg_rate() argument
586 unsigned int rate, iops, sectors = bytes >> 9; in mmc_test_print_avg_rate()
587 uint64_t tot = bytes * count; in mmc_test_print_avg_rate()
598 mmc_hostname(test->card->host), count, sectors, count, in mmc_test_print_avg_rate()
602 test->area.sg_len); in mmc_test_print_avg_rate()
613 return card->ext_csd.sectors; in mmc_test_capacity()
615 return card->csd.capacity << (card->csd.read_blkbits - 9); in mmc_test_capacity()
630 ret = mmc_test_set_blksize(test, 512); in __mmc_test_prepare()
635 memset(test->buffer, val, 512); in __mmc_test_prepare()
637 for (i = 0; i < 512; i++) in __mmc_test_prepare()
638 test->buffer[i] = i; in __mmc_test_prepare()
641 for (i = 0; i < BUFFER_SIZE / 512; i++) { in __mmc_test_prepare()
642 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); in __mmc_test_prepare()
675 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) in mmc_test_prepare_broken_mrq()
678 if (mrq->data->blocks > 1) { in mmc_test_prepare_broken_mrq()
679 mrq->cmd->opcode = write ? in mmc_test_prepare_broken_mrq()
681 mrq->stop = NULL; in mmc_test_prepare_broken_mrq()
683 mrq->cmd->opcode = MMC_SEND_STATUS; in mmc_test_prepare_broken_mrq()
684 mrq->cmd->arg = test->card->rca << 16; in mmc_test_prepare_broken_mrq()
696 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) in mmc_test_check_result()
697 return -EINVAL; in mmc_test_check_result()
701 if (mrq->sbc && mrq->sbc->error) in mmc_test_check_result()
702 ret = mrq->sbc->error; in mmc_test_check_result()
703 if (!ret && mrq->cmd->error) in mmc_test_check_result()
704 ret = mrq->cmd->error; in mmc_test_check_result()
705 if (!ret && mrq->data->error) in mmc_test_check_result()
706 ret = mrq->data->error; in mmc_test_check_result()
707 if (!ret && mrq->stop && mrq->stop->error) in mmc_test_check_result()
708 ret = mrq->stop->error; in mmc_test_check_result()
709 if (!ret && mrq->data->bytes_xfered != in mmc_test_check_result()
710 mrq->data->blocks * mrq->data->blksz) in mmc_test_check_result()
713 if (ret == -EINVAL) in mmc_test_check_result()
727 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data)) in mmc_test_check_broken_result()
728 return -EINVAL; in mmc_test_check_broken_result()
732 if (!ret && mrq->cmd->error) in mmc_test_check_broken_result()
733 ret = mrq->cmd->error; in mmc_test_check_broken_result()
734 if (!ret && mrq->data->error == 0) in mmc_test_check_broken_result()
736 if (!ret && mrq->data->error != -ETIMEDOUT) in mmc_test_check_broken_result()
737 ret = mrq->data->error; in mmc_test_check_broken_result()
738 if (!ret && mrq->stop && mrq->stop->error) in mmc_test_check_broken_result()
739 ret = mrq->stop->error; in mmc_test_check_broken_result()
740 if (mrq->data->blocks > 1) { in mmc_test_check_broken_result()
741 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) in mmc_test_check_broken_result()
744 if (!ret && mrq->data->bytes_xfered > 0) in mmc_test_check_broken_result()
748 if (ret == -EINVAL) in mmc_test_check_broken_result()
770 rq->mrq.cmd = &rq->cmd; in mmc_test_req_reset()
771 rq->mrq.data = &rq->data; in mmc_test_req_reset()
772 rq->mrq.stop = &rq->stop; in mmc_test_req_reset()
787 complete(&mrq->completion); in mmc_test_wait_done()
794 struct mmc_host *host = test->card->host; in mmc_test_start_areq()
798 init_completion(&mrq->completion); in mmc_test_start_areq()
799 mrq->done = mmc_test_wait_done; in mmc_test_start_areq()
804 wait_for_completion(&prev_mrq->completion); in mmc_test_start_areq()
833 struct mmc_test_area *t = &test->area; in mmc_test_nonblock_transfer()
834 struct scatterlist *sg = t->sg; in mmc_test_nonblock_transfer()
835 struct scatterlist *sg_areq = t->sg_areq; in mmc_test_nonblock_transfer()
844 mrq = &rq1->mrq; in mmc_test_nonblock_transfer()
849 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr, in mmc_test_nonblock_transfer()
850 t->blocks, 512, write); in mmc_test_nonblock_transfer()
856 prev_mrq = &rq2->mrq; in mmc_test_nonblock_transfer()
860 dev_addr += t->blocks; in mmc_test_nonblock_transfer()
889 mmc_wait_for_req(test->card->host, &mrq); in mmc_test_simple_transfer()
913 sg_init_one(&sg, test->buffer, blocks * blksz); in mmc_test_broken_transfer()
918 mmc_wait_for_req(test->card->host, &mrq); in mmc_test_broken_transfer()
938 test->scratch[i] = i; in mmc_test_transfer()
940 memset(test->scratch, 0, BUFFER_SIZE); in mmc_test_transfer()
942 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); in mmc_test_transfer()
956 ret = mmc_test_set_blksize(test, 512); in mmc_test_transfer()
960 sectors = (blocks * blksz + 511) / 512; in mmc_test_transfer()
961 if ((sectors * 512) == (blocks * blksz)) in mmc_test_transfer()
964 if ((sectors * 512) > BUFFER_SIZE) in mmc_test_transfer()
965 return -EINVAL; in mmc_test_transfer()
967 memset(test->buffer, 0, sectors * 512); in mmc_test_transfer()
971 test->buffer + i * 512, in mmc_test_transfer()
972 dev_addr + i, 512, 0); in mmc_test_transfer()
978 if (test->buffer[i] != (u8)i) in mmc_test_transfer()
982 for (; i < sectors * 512; i++) { in mmc_test_transfer()
983 if (test->buffer[i] != 0xDF) in mmc_test_transfer()
987 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); in mmc_test_transfer()
989 if (test->scratch[i] != (u8)i) in mmc_test_transfer()
1014 ret = mmc_test_set_blksize(test, 512); in mmc_test_basic_write()
1018 sg_init_one(&sg, test->buffer, 512); in mmc_test_basic_write()
1020 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); in mmc_test_basic_write()
1028 ret = mmc_test_set_blksize(test, 512); in mmc_test_basic_read()
1032 sg_init_one(&sg, test->buffer, 512); in mmc_test_basic_read()
1034 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); in mmc_test_basic_read()
1041 sg_init_one(&sg, test->buffer, 512); in mmc_test_verify_write()
1043 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); in mmc_test_verify_write()
1050 sg_init_one(&sg, test->buffer, 512); in mmc_test_verify_read()
1052 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); in mmc_test_verify_read()
1060 if (test->card->host->max_blk_count == 1) in mmc_test_multi_write()
1064 size = min(size, test->card->host->max_req_size); in mmc_test_multi_write()
1065 size = min(size, test->card->host->max_seg_size); in mmc_test_multi_write()
1066 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_multi_write()
1071 sg_init_one(&sg, test->buffer, size); in mmc_test_multi_write()
1073 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); in mmc_test_multi_write()
1081 if (test->card->host->max_blk_count == 1) in mmc_test_multi_read()
1085 size = min(size, test->card->host->max_req_size); in mmc_test_multi_read()
1086 size = min(size, test->card->host->max_seg_size); in mmc_test_multi_read()
1087 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_multi_read()
1092 sg_init_one(&sg, test->buffer, size); in mmc_test_multi_read()
1094 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); in mmc_test_multi_read()
1102 if (!test->card->csd.write_partial) in mmc_test_pow2_write()
1105 for (i = 1; i < 512; i <<= 1) { in mmc_test_pow2_write()
1106 sg_init_one(&sg, test->buffer, i); in mmc_test_pow2_write()
1120 if (!test->card->csd.read_partial) in mmc_test_pow2_read()
1123 for (i = 1; i < 512; i <<= 1) { in mmc_test_pow2_read()
1124 sg_init_one(&sg, test->buffer, i); in mmc_test_pow2_read()
1138 if (!test->card->csd.write_partial) in mmc_test_weird_write()
1141 for (i = 3; i < 512; i += 7) { in mmc_test_weird_write()
1142 sg_init_one(&sg, test->buffer, i); in mmc_test_weird_write()
1156 if (!test->card->csd.read_partial) in mmc_test_weird_read()
1159 for (i = 3; i < 512; i += 7) { in mmc_test_weird_read()
1160 sg_init_one(&sg, test->buffer, i); in mmc_test_weird_read()
1175 sg_init_one(&sg, test->buffer + i, 512); in mmc_test_align_write()
1176 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); in mmc_test_align_write()
1190 sg_init_one(&sg, test->buffer + i, 512); in mmc_test_align_read()
1191 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); in mmc_test_align_read()
1205 if (test->card->host->max_blk_count == 1) in mmc_test_align_multi_write()
1209 size = min(size, test->card->host->max_req_size); in mmc_test_align_multi_write()
1210 size = min(size, test->card->host->max_seg_size); in mmc_test_align_multi_write()
1211 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_align_multi_write()
1217 sg_init_one(&sg, test->buffer + i, size); in mmc_test_align_multi_write()
1218 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); in mmc_test_align_multi_write()
1232 if (test->card->host->max_blk_count == 1) in mmc_test_align_multi_read()
1236 size = min(size, test->card->host->max_req_size); in mmc_test_align_multi_read()
1237 size = min(size, test->card->host->max_seg_size); in mmc_test_align_multi_read()
1238 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_align_multi_read()
1244 sg_init_one(&sg, test->buffer + i, size); in mmc_test_align_multi_read()
1245 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); in mmc_test_align_multi_read()
1257 ret = mmc_test_set_blksize(test, 512); in mmc_test_xfersize_write()
1261 return mmc_test_broken_transfer(test, 1, 512, 1); in mmc_test_xfersize_write()
1268 ret = mmc_test_set_blksize(test, 512); in mmc_test_xfersize_read()
1272 return mmc_test_broken_transfer(test, 1, 512, 0); in mmc_test_xfersize_read()
1279 if (test->card->host->max_blk_count == 1) in mmc_test_multi_xfersize_write()
1282 ret = mmc_test_set_blksize(test, 512); in mmc_test_multi_xfersize_write()
1286 return mmc_test_broken_transfer(test, 2, 512, 1); in mmc_test_multi_xfersize_write()
1293 if (test->card->host->max_blk_count == 1) in mmc_test_multi_xfersize_read()
1296 ret = mmc_test_set_blksize(test, 512); in mmc_test_multi_xfersize_read()
1300 return mmc_test_broken_transfer(test, 2, 512, 0); in mmc_test_multi_xfersize_read()
1310 sg_set_page(&sg, test->highmem, 512, 0); in mmc_test_write_high()
1312 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); in mmc_test_write_high()
1320 sg_set_page(&sg, test->highmem, 512, 0); in mmc_test_read_high()
1322 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); in mmc_test_read_high()
1330 if (test->card->host->max_blk_count == 1) in mmc_test_multi_write_high()
1334 size = min(size, test->card->host->max_req_size); in mmc_test_multi_write_high()
1335 size = min(size, test->card->host->max_seg_size); in mmc_test_multi_write_high()
1336 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_multi_write_high()
1342 sg_set_page(&sg, test->highmem, size, 0); in mmc_test_multi_write_high()
1344 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1); in mmc_test_multi_write_high()
1352 if (test->card->host->max_blk_count == 1) in mmc_test_multi_read_high()
1356 size = min(size, test->card->host->max_req_size); in mmc_test_multi_read_high()
1357 size = min(size, test->card->host->max_seg_size); in mmc_test_multi_read_high()
1358 size = min(size, test->card->host->max_blk_count * 512); in mmc_test_multi_read_high()
1364 sg_set_page(&sg, test->highmem, size, 0); in mmc_test_multi_read_high()
1366 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0); in mmc_test_multi_read_high()
1373 pr_info("%s: Highmem not configured - test skipped\n", in mmc_test_no_highmem()
1374 mmc_hostname(test->card->host)); in mmc_test_no_highmem()
1381 * Map sz bytes so that it can be transferred.
1386 struct mmc_test_area *t = &test->area; in mmc_test_area_map()
1390 t->blocks = sz >> 9; in mmc_test_area_map()
1393 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, in mmc_test_area_map()
1394 t->max_segs, t->max_seg_sz, in mmc_test_area_map()
1395 &t->sg_len); in mmc_test_area_map()
1397 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, in mmc_test_area_map()
1398 t->max_seg_sz, &t->sg_len, min_sg_len); in mmc_test_area_map()
1405 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq, in mmc_test_area_map()
1406 t->max_segs, t->max_seg_sz, in mmc_test_area_map()
1409 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs, in mmc_test_area_map()
1410 t->max_seg_sz, &sg_len, min_sg_len); in mmc_test_area_map()
1412 if (!err && sg_len != t->sg_len) in mmc_test_area_map()
1413 err = -EINVAL; in mmc_test_area_map()
1418 mmc_hostname(test->card->host)); in mmc_test_area_map()
1423 * Transfer bytes mapped by mmc_test_area_map().
1428 struct mmc_test_area *t = &test->area; in mmc_test_area_transfer()
1430 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, in mmc_test_area_transfer()
1431 t->blocks, 512, write); in mmc_test_area_transfer()
1435 * Map and transfer bytes for multiple transfers.
1451 struct mmc_test_area *t = &test->area; in mmc_test_area_io_seq()
1454 if (t->max_seg_sz >= PAGE_SIZE) in mmc_test_area_io_seq()
1455 max_tfr = t->max_segs * PAGE_SIZE; in mmc_test_area_io_seq()
1457 max_tfr = t->max_segs * t->max_seg_sz; in mmc_test_area_io_seq()
1501 struct mmc_test_area *t = &test->area; in mmc_test_area_fill()
1503 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); in mmc_test_area_fill()
1511 struct mmc_test_area *t = &test->area; in mmc_test_area_erase()
1513 if (!mmc_can_erase(test->card)) in mmc_test_area_erase()
1516 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, in mmc_test_area_erase()
1525 struct mmc_test_area *t = &test->area; in mmc_test_area_cleanup()
1527 kfree(t->sg); in mmc_test_area_cleanup()
1528 kfree(t->sg_areq); in mmc_test_area_cleanup()
1529 mmc_test_free_mem(t->mem); in mmc_test_area_cleanup()
1543 struct mmc_test_area *t = &test->area; in mmc_test_area_init()
1547 ret = mmc_test_set_blksize(test, 512); in mmc_test_area_init()
1552 sz = (unsigned long)test->card->pref_erase << 9; in mmc_test_area_init()
1553 t->max_sz = sz; in mmc_test_area_init()
1554 while (t->max_sz < 4 * 1024 * 1024) in mmc_test_area_init()
1555 t->max_sz += sz; in mmc_test_area_init()
1556 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) in mmc_test_area_init()
1557 t->max_sz -= sz; in mmc_test_area_init()
1559 t->max_segs = test->card->host->max_segs; in mmc_test_area_init()
1560 t->max_seg_sz = test->card->host->max_seg_size; in mmc_test_area_init()
1561 t->max_seg_sz -= t->max_seg_sz % 512; in mmc_test_area_init()
1563 t->max_tfr = t->max_sz; in mmc_test_area_init()
1564 if (t->max_tfr >> 9 > test->card->host->max_blk_count) in mmc_test_area_init()
1565 t->max_tfr = test->card->host->max_blk_count << 9; in mmc_test_area_init()
1566 if (t->max_tfr > test->card->host->max_req_size) in mmc_test_area_init()
1567 t->max_tfr = test->card->host->max_req_size; in mmc_test_area_init()
1568 if (t->max_tfr / t->max_seg_sz > t->max_segs) in mmc_test_area_init()
1569 t->max_tfr = t->max_segs * t->max_seg_sz; in mmc_test_area_init()
1577 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, in mmc_test_area_init()
1578 t->max_seg_sz); in mmc_test_area_init()
1579 if (!t->mem) in mmc_test_area_init()
1580 return -ENOMEM; in mmc_test_area_init()
1582 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL); in mmc_test_area_init()
1583 if (!t->sg) { in mmc_test_area_init()
1584 ret = -ENOMEM; in mmc_test_area_init()
1588 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq), in mmc_test_area_init()
1590 if (!t->sg_areq) { in mmc_test_area_init()
1591 ret = -ENOMEM; in mmc_test_area_init()
1595 t->dev_addr = mmc_test_capacity(test->card) / 2; in mmc_test_area_init()
1596 t->dev_addr -= t->dev_addr % (t->max_sz >> 9); in mmc_test_area_init()
1642 * Test best-case performance. Best-case performance is expected from
1652 struct mmc_test_area *t = &test->area; in mmc_test_best_performance()
1654 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, in mmc_test_best_performance()
1659 * Best-case read performance.
1667 * Best-case write performance.
1675 * Best-case read performance into scattered pages.
1683 * Best-case write performance from scattered pages.
1695 struct mmc_test_area *t = &test->area; in mmc_test_profile_read_perf()
1700 for (sz = 512; sz < t->max_tfr; sz <<= 1) { in mmc_test_profile_read_perf()
1701 dev_addr = t->dev_addr + (sz >> 9); in mmc_test_profile_read_perf()
1706 sz = t->max_tfr; in mmc_test_profile_read_perf()
1707 dev_addr = t->dev_addr; in mmc_test_profile_read_perf()
1716 struct mmc_test_area *t = &test->area; in mmc_test_profile_write_perf()
1724 for (sz = 512; sz < t->max_tfr; sz <<= 1) { in mmc_test_profile_write_perf()
1725 dev_addr = t->dev_addr + (sz >> 9); in mmc_test_profile_write_perf()
1733 sz = t->max_tfr; in mmc_test_profile_write_perf()
1734 dev_addr = t->dev_addr; in mmc_test_profile_write_perf()
1743 struct mmc_test_area *t = &test->area; in mmc_test_profile_trim_perf()
1749 if (!mmc_can_trim(test->card)) in mmc_test_profile_trim_perf()
1752 if (!mmc_can_erase(test->card)) in mmc_test_profile_trim_perf()
1755 for (sz = 512; sz < t->max_sz; sz <<= 1) { in mmc_test_profile_trim_perf()
1756 dev_addr = t->dev_addr + (sz >> 9); in mmc_test_profile_trim_perf()
1758 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); in mmc_test_profile_trim_perf()
1764 dev_addr = t->dev_addr; in mmc_test_profile_trim_perf()
1766 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); in mmc_test_profile_trim_perf()
1776 struct mmc_test_area *t = &test->area; in mmc_test_seq_read_perf()
1781 cnt = t->max_sz / sz; in mmc_test_seq_read_perf()
1782 dev_addr = t->dev_addr; in mmc_test_seq_read_perf()
1800 struct mmc_test_area *t = &test->area; in mmc_test_profile_seq_read_perf()
1804 for (sz = 512; sz < t->max_tfr; sz <<= 1) { in mmc_test_profile_seq_read_perf()
1809 sz = t->max_tfr; in mmc_test_profile_seq_read_perf()
1815 struct mmc_test_area *t = &test->area; in mmc_test_seq_write_perf()
1823 cnt = t->max_sz / sz; in mmc_test_seq_write_perf()
1824 dev_addr = t->dev_addr; in mmc_test_seq_write_perf()
1842 struct mmc_test_area *t = &test->area; in mmc_test_profile_seq_write_perf()
1846 for (sz = 512; sz < t->max_tfr; sz <<= 1) { in mmc_test_profile_seq_write_perf()
1851 sz = t->max_tfr; in mmc_test_profile_seq_write_perf()
1860 struct mmc_test_area *t = &test->area; in mmc_test_profile_seq_trim_perf()
1866 if (!mmc_can_trim(test->card)) in mmc_test_profile_seq_trim_perf()
1869 if (!mmc_can_erase(test->card)) in mmc_test_profile_seq_trim_perf()
1872 for (sz = 512; sz <= t->max_sz; sz <<= 1) { in mmc_test_profile_seq_trim_perf()
1879 cnt = t->max_sz / sz; in mmc_test_profile_seq_trim_perf()
1880 dev_addr = t->dev_addr; in mmc_test_profile_seq_trim_perf()
1883 ret = mmc_erase(test->card, dev_addr, sz >> 9, in mmc_test_profile_seq_trim_perf()
1916 rnd_addr = mmc_test_capacity(test->card) / 4; in mmc_test_rnd_perf()
1917 range1 = rnd_addr / test->card->pref_erase; in mmc_test_rnd_perf()
1928 ea -= 1; in mmc_test_rnd_perf()
1930 dev_addr = rnd_addr + test->card->pref_erase * ea + in mmc_test_rnd_perf()
1933 mmc_retune_needed(test->card->host); in mmc_test_rnd_perf()
1945 struct mmc_test_area *t = &test->area; in mmc_test_random_perf()
1950 for (sz = 512; sz < t->max_tfr; sz <<= 1) { in mmc_test_random_perf()
1967 sz = t->max_tfr; in mmc_test_random_perf()
1980 if (!mmc_can_retune(test->card->host)) { in mmc_test_retuning()
1981 pr_info("%s: No retuning - test skipped\n", in mmc_test_retuning()
1982 mmc_hostname(test->card->host)); in mmc_test_retuning()
2008 struct mmc_test_area *t = &test->area; in mmc_test_seq_perf()
2013 sz = t->max_tfr; in mmc_test_seq_perf()
2022 if (t->max_seg_sz >= PAGE_SIZE) in mmc_test_seq_perf()
2023 max_tfr = t->max_segs * PAGE_SIZE; in mmc_test_seq_perf()
2025 max_tfr = t->max_segs * t->max_seg_sz; in mmc_test_seq_perf()
2031 dev_addr = mmc_test_capacity(test->card) / 4; in mmc_test_seq_perf()
2097 struct mmc_test_area *t = &test->area; in mmc_test_rw_multiple()
2101 if (size > mmc_test_capacity(test->card) / 2 * 512) in mmc_test_rw_multiple()
2102 size = mmc_test_capacity(test->card) / 2 * 512; in mmc_test_rw_multiple()
2103 if (reqsize > t->max_tfr) in mmc_test_rw_multiple()
2104 reqsize = t->max_tfr; in mmc_test_rw_multiple()
2105 dev_addr = mmc_test_capacity(test->card) / 4; in mmc_test_rw_multiple()
2117 if (mmc_can_erase(test->card) && in mmc_test_rw_multiple()
2118 tdata->prepare & MMC_TEST_PREP_ERASE) { in mmc_test_rw_multiple()
2119 ret = mmc_erase(test->card, dev_addr, in mmc_test_rw_multiple()
2120 size / 512, test->card->erase_arg); in mmc_test_rw_multiple()
2122 ret = mmc_erase(test->card, dev_addr, in mmc_test_rw_multiple()
2123 size / 512, MMC_ERASE_ARG); in mmc_test_rw_multiple()
2130 tdata->do_write, 0, 1, size / reqsize, in mmc_test_rw_multiple()
2131 tdata->do_nonblock_req, min_sg_len); in mmc_test_rw_multiple()
2146 void *pre_req = test->card->host->ops->pre_req; in mmc_test_rw_multiple_size()
2147 void *post_req = test->card->host->ops->post_req; in mmc_test_rw_multiple_size()
2149 if (rw->do_nonblock_req && in mmc_test_rw_multiple_size()
2152 return -EINVAL; in mmc_test_rw_multiple_size()
2155 for (i = 0 ; i < rw->len && ret == 0; i++) { in mmc_test_rw_multiple_size()
2156 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); in mmc_test_rw_multiple_size()
2169 for (i = 0 ; i < rw->len && ret == 0; i++) { in mmc_test_rw_multiple_sg_len()
2170 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size, in mmc_test_rw_multiple_sg_len()
2171 rw->sg_len[i]); in mmc_test_rw_multiple_sg_len()
2198 * Multiple non-blocking write 4k to 4 MB chunks
2236 * Multiple non-blocking read 4k to 4 MB chunks
2255 * Multiple blocking write 1 to 512 sg elements
2274 * Multiple non-blocking write 1 to 512 sg elements
2293 * Multiple blocking read 1 to 512 sg elements
2312 * Multiple non-blocking read 1 to 512 sg elements
2335 struct mmc_card *card = test->card; in mmc_test_reset()
2341 * Reset will re-enable the card's command queue, but tests in mmc_test_reset()
2344 if (card->ext_csd.cmdq_en) in mmc_test_reset()
2347 } else if (err == -EOPNOTSUPP) { in mmc_test_reset()
2359 cmd->opcode = MMC_SEND_STATUS; in mmc_test_send_status()
2360 if (!mmc_host_is_spi(test->card->host)) in mmc_test_send_status()
2361 cmd->arg = test->card->rca << 16; in mmc_test_send_status()
2362 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; in mmc_test_send_status()
2364 return mmc_wait_for_cmd(test->card->host, cmd, 0); in mmc_test_send_status()
2372 struct mmc_host *host = test->card->host; in mmc_test_ongoing_transfer()
2373 struct mmc_test_area *t = &test->area; in mmc_test_ongoing_transfer()
2382 return -ENOMEM; in mmc_test_ongoing_transfer()
2384 mrq = &rq->mrq; in mmc_test_ongoing_transfer()
2386 mrq->sbc = &rq->sbc; in mmc_test_ongoing_transfer()
2387 mrq->cap_cmd_during_tfr = true; in mmc_test_ongoing_transfer()
2389 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks, in mmc_test_ongoing_transfer()
2390 512, write); in mmc_test_ongoing_transfer()
2392 if (use_sbc && t->blocks > 1 && !mrq->sbc) { in mmc_test_ongoing_transfer()
2413 cmd_ret = mmc_test_send_status(test, &rq->status); in mmc_test_ongoing_transfer()
2417 status = rq->status.resp[0]; in mmc_test_ongoing_transfer()
2419 cmd_ret = -EIO; in mmc_test_ongoing_transfer()
2430 cmd_ret = -ETIMEDOUT; in mmc_test_ongoing_transfer()
2439 mmc_wait_for_req_done(test->card->host, mrq); in mmc_test_ongoing_transfer()
2446 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) { in mmc_test_ongoing_transfer()
2448 mmc_wait_for_cmd(host, mrq->data->stop, 0); in mmc_test_ongoing_transfer()
2450 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0); in mmc_test_ongoing_transfer()
2458 mmc_hostname(test->card->host), status, cmd_ret); in mmc_test_ongoing_transfer()
2469 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr) in mmc_test_ongoing_transfer()
2471 mmc_hostname(test->card->host), count, t->blocks); in mmc_test_ongoing_transfer()
2485 struct mmc_test_area *t = &test->area; in __mmc_test_cmds_during_tfr()
2488 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR)) in __mmc_test_cmds_during_tfr()
2495 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write, in __mmc_test_cmds_during_tfr()
2500 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write, in __mmc_test_cmds_during_tfr()
2507 struct mmc_test_area *t = &test->area; in mmc_test_cmds_during_tfr()
2511 for (sz = 512; sz <= t->max_tfr; sz += 512) { in mmc_test_cmds_during_tfr()
2521 * Commands during read - no Set Block Count (CMD23).
2529 * Commands during write - no Set Block Count (CMD23).
2537 * Commands during read - use Set Block Count (CMD23).
2545 * Commands during write - use Set Block Count (CMD23).
2553 * Commands during non-blocking read - use Set Block Count (CMD23).
2561 * Commands during non-blocking write - use Set Block Count (CMD23).
2594 .name = "Multi-block write",
2601 .name = "Multi-block read",
2650 .name = "Badly aligned multi-block write",
2657 .name = "Badly aligned multi-block read",
2700 .name = "Multi-block highmem write",
2707 .name = "Multi-block highmem read",
2726 .name = "Multi-block highmem write",
2731 .name = "Multi-block highmem read",
2738 .name = "Best-case read performance",
2745 .name = "Best-case write performance",
2752 .name = "Best-case read performance into scattered pages",
2759 .name = "Best-case write performance from scattered pages",
2843 .name = "Write performance with non-blocking req 4k to 4MB",
2857 .name = "Read performance with non-blocking req 4k to 4MB",
2864 .name = "Write performance blocking req 1 to 512 sg elems",
2871 .name = "Write performance non-blocking req 1 to 512 sg elems",
2878 .name = "Read performance blocking req 1 to 512 sg elems",
2885 .name = "Read performance non-blocking req 1 to 512 sg elems",
2897 .name = "Commands during read - no Set Block Count (CMD23)",
2904 .name = "Commands during write - no Set Block Count (CMD23)",
2911 .name = "Commands during read - use Set Block Count (CMD23)",
2918 .name = "Commands during write - use Set Block Count (CMD23)",
2925 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2932 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2939 .name = "Re-tuning reliability",
2956 mmc_hostname(test->card->host), mmc_card_id(test->card)); in mmc_test_run()
2958 mmc_claim_host(test->card->host); in mmc_test_run()
2967 mmc_hostname(test->card->host), i + 1, in mmc_test_run()
2974 mmc_hostname(test->card->host), in mmc_test_run()
2982 INIT_LIST_HEAD(&gr->tr_lst); in mmc_test_run()
2985 gr->card = test->card; in mmc_test_run()
2986 gr->testcase = i; in mmc_test_run()
2989 list_add_tail(&gr->link, &mmc_test_result); in mmc_test_run()
2995 test->gr = gr; in mmc_test_run()
3002 mmc_hostname(test->card->host)); in mmc_test_run()
3006 mmc_hostname(test->card->host)); in mmc_test_run()
3010 mmc_hostname(test->card->host)); in mmc_test_run()
3014 mmc_hostname(test->card->host)); in mmc_test_run()
3018 mmc_hostname(test->card->host), ret); in mmc_test_run()
3023 gr->result = ret; in mmc_test_run()
3029 mmc_hostname(test->card->host), in mmc_test_run()
3035 mmc_release_host(test->card->host); in mmc_test_run()
3038 mmc_hostname(test->card->host)); in mmc_test_run()
3050 if (card && gr->card != card) in mmc_test_free_result()
3053 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { in mmc_test_free_result()
3054 list_del(&tr->link); in mmc_test_free_result()
3058 list_del(&gr->link); in mmc_test_free_result()
3069 struct mmc_card *card = sf->private; in mtf_test_show()
3077 if (gr->card != card) in mtf_test_show()
3080 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); in mtf_test_show()
3082 list_for_each_entry(tr, &gr->tr_lst, link) { in mtf_test_show()
3084 tr->count, tr->sectors, in mtf_test_show()
3085 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec, in mtf_test_show()
3086 tr->rate, tr->iops / 100, tr->iops % 100); in mtf_test_show()
3097 return single_open(file, mtf_test_show, inode->i_private); in mtf_test_open()
3103 struct seq_file *sf = file->private_data;
3104 struct mmc_card *card = sf->private;
3115 return -ENOMEM;
3123 test->card = card;
3125 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3127 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3131 if (test->buffer && test->highmem) {
3133 if (test->buffer) {
3141 __free_pages(test->highmem, BUFFER_ORDER);
3143 kfree(test->buffer);
3181 if (card && df->card != card)
3183 debugfs_remove(df->file);
3184 list_del(&df->link);
3197 if (card->debugfs_root)
3198 file = debugfs_create_file(name, mode, card->debugfs_root,
3204 return -ENOMEM;
3207 df->card = card;
3208 df->file = file;
3210 list_add(&df->link, &mmc_test_file_test);
3241 return -ENODEV;
3247 if (card->ext_csd.cmdq_en) {
3248 mmc_claim_host(card->host);
3250 mmc_release_host(card->host);
3255 dev_info(&card->dev, "Card claimed for testing.\n");
3262 if (card->reenable_cmdq) {
3263 mmc_claim_host(card->host);
3265 mmc_release_host(card->host);