Lines Matching full:msb
147 static int msb_validate_used_block_bitmap(struct msb_data *msb) in msb_validate_used_block_bitmap() argument
155 for (i = 0; i < msb->zone_count; i++) in msb_validate_used_block_bitmap()
156 total_free_blocks += msb->free_block_count[i]; in msb_validate_used_block_bitmap()
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap, in msb_validate_used_block_bitmap()
159 msb->block_count) == total_free_blocks) in msb_validate_used_block_bitmap()
163 msb->read_only = true; in msb_validate_used_block_bitmap()
168 static void msb_mark_block_used(struct msb_data *msb, int pba) in msb_mark_block_used() argument
172 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_used()
175 msb->read_only = true; in msb_mark_block_used()
179 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_used()
183 __set_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_used()
184 msb->free_block_count[zone]--; in msb_mark_block_used()
188 static void msb_mark_block_unused(struct msb_data *msb, int pba) in msb_mark_block_unused() argument
192 if (!test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_unused()
194 msb->read_only = true; in msb_mark_block_unused()
198 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_unused()
202 __clear_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_unused()
203 msb->free_block_count[zone]++; in msb_mark_block_unused()
207 static void msb_invalidate_reg_window(struct msb_data *msb) in msb_invalidate_reg_window() argument
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
210 msb->reg_addr.w_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
212 msb->reg_addr.r_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
213 msb->addr_valid = false; in msb_invalidate_reg_window()
217 static int msb_run_state_machine(struct msb_data *msb, int (*state_func) in msb_run_state_machine() argument
220 struct memstick_dev *card = msb->card; in msb_run_state_machine()
222 WARN_ON(msb->state != -1); in msb_run_state_machine()
223 msb->int_polling = false; in msb_run_state_machine()
224 msb->state = 0; in msb_run_state_machine()
225 msb->exit_error = 0; in msb_run_state_machine()
233 WARN_ON(msb->state != -1); in msb_run_state_machine()
234 return msb->exit_error; in msb_run_state_machine()
238 static int msb_exit_state_machine(struct msb_data *msb, int error) in msb_exit_state_machine() argument
240 WARN_ON(msb->state == -1); in msb_exit_state_machine()
242 msb->state = -1; in msb_exit_state_machine()
243 msb->exit_error = error; in msb_exit_state_machine()
244 msb->card->next_request = h_msb_default_bad; in msb_exit_state_machine()
248 msb_invalidate_reg_window(msb); in msb_exit_state_machine()
250 complete(&msb->card->mrq_complete); in msb_exit_state_machine()
255 static int msb_read_int_reg(struct msb_data *msb, long timeout) in msb_read_int_reg() argument
257 struct memstick_request *mrq = &msb->card->current_mrq; in msb_read_int_reg()
259 WARN_ON(msb->state == -1); in msb_read_int_reg()
261 if (!msb->int_polling) { in msb_read_int_reg()
262 msb->int_timeout = jiffies + in msb_read_int_reg()
264 msb->int_polling = true; in msb_read_int_reg()
265 } else if (time_after(jiffies, msb->int_timeout)) { in msb_read_int_reg()
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) && in msb_read_int_reg()
282 static int msb_read_regs(struct msb_data *msb, int offset, int len) in msb_read_regs() argument
284 struct memstick_request *req = &msb->card->current_mrq; in msb_read_regs()
286 if (msb->reg_addr.r_offset != offset || in msb_read_regs()
287 msb->reg_addr.r_length != len || !msb->addr_valid) { in msb_read_regs()
289 msb->reg_addr.r_offset = offset; in msb_read_regs()
290 msb->reg_addr.r_length = len; in msb_read_regs()
291 msb->addr_valid = true; in msb_read_regs()
294 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_read_regs()
303 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf) in msb_write_regs() argument
305 struct memstick_request *req = &msb->card->current_mrq; in msb_write_regs()
307 if (msb->reg_addr.w_offset != offset || in msb_write_regs()
308 msb->reg_addr.w_length != len || !msb->addr_valid) { in msb_write_regs()
310 msb->reg_addr.w_offset = offset; in msb_write_regs()
311 msb->reg_addr.w_length = len; in msb_write_regs()
312 msb->addr_valid = true; in msb_write_regs()
315 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_write_regs()
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
338 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_read_page() local
345 return msb_exit_state_machine(msb, mrq->error); in h_msb_read_page()
348 switch (msb->state) { in h_msb_read_page()
353 if (!msb_write_regs(msb, in h_msb_read_page()
356 (unsigned char *)&msb->regs.param)) in h_msb_read_page()
359 msb->state = MSB_RP_SEND_READ_COMMAND; in h_msb_read_page()
365 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
369 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT; in h_msb_read_page()
372 if (msb_read_int_reg(msb, -1)) in h_msb_read_page()
378 msb->regs.status.interrupt = intreg; in h_msb_read_page()
381 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
384 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
388 msb->int_polling = false; in h_msb_read_page()
389 msb->state = (intreg & MEMSTICK_INT_ERR) ? in h_msb_read_page()
395 if (!msb_read_regs(msb, in h_msb_read_page()
400 msb->state = MSB_RP_RECEIVE_STATUS_REG; in h_msb_read_page()
404 msb->regs.status = *(struct ms_status_register *)mrq->data; in h_msb_read_page()
405 msb->state = MSB_RP_SEND_OOB_READ; in h_msb_read_page()
409 if (!msb_read_regs(msb, in h_msb_read_page()
414 msb->state = MSB_RP_RECEIVE_OOB_READ; in h_msb_read_page()
418 msb->regs.extra_data = in h_msb_read_page()
420 msb->state = MSB_RP_SEND_READ_DATA; in h_msb_read_page()
425 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) { in h_msb_read_page()
426 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
431 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_read_page()
432 msb->current_sg_offset, in h_msb_read_page()
433 msb->page_size); in h_msb_read_page()
436 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
440 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) { in h_msb_read_page()
441 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
442 return msb_exit_state_machine(msb, 0); in h_msb_read_page()
445 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) { in h_msb_read_page()
447 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_read_page()
450 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) { in h_msb_read_page()
452 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
453 return msb_exit_state_machine(msb, -EUCLEAN); in h_msb_read_page()
456 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
465 * Takes address from msb->regs.param.
467 * from msb->regs.extra
474 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_write_block() local
480 return msb_exit_state_machine(msb, mrq->error); in h_msb_write_block()
483 switch (msb->state) { in h_msb_write_block()
492 if (!msb_write_regs(msb, in h_msb_write_block()
495 &msb->regs.param)) in h_msb_write_block()
498 msb->state = MSB_WB_SEND_WRITE_OOB; in h_msb_write_block()
502 if (!msb_write_regs(msb, in h_msb_write_block()
505 &msb->regs.extra_data)) in h_msb_write_block()
507 msb->state = MSB_WB_SEND_WRITE_COMMAND; in h_msb_write_block()
514 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
518 msb->state = MSB_WB_RECEIVE_INT_REQ; in h_msb_write_block()
519 if (msb_read_int_reg(msb, -1)) in h_msb_write_block()
525 msb->regs.status.interrupt = intreg; in h_msb_write_block()
529 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
532 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_write_block()
536 if (msb->current_page == msb->pages_in_block) { in h_msb_write_block()
538 return msb_exit_state_machine(msb, 0); in h_msb_write_block()
539 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
546 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
550 msb->int_polling = false; in h_msb_write_block()
551 msb->state = MSB_WB_SEND_WRITE_DATA; in h_msb_write_block()
557 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_write_block()
558 msb->current_sg_offset, in h_msb_write_block()
559 msb->page_size) < msb->page_size) in h_msb_write_block()
560 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
564 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION; in h_msb_write_block()
568 msb->current_page++; in h_msb_write_block()
569 msb->current_sg_offset += msb->page_size; in h_msb_write_block()
570 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
586 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_send_command() local
592 return msb_exit_state_machine(msb, mrq->error); in h_msb_send_command()
595 switch (msb->state) { in h_msb_send_command()
599 if (!msb_write_regs(msb, in h_msb_send_command()
602 &msb->regs.param)) in h_msb_send_command()
604 msb->state = MSB_SC_SEND_WRITE_OOB; in h_msb_send_command()
608 if (!msb->command_need_oob) { in h_msb_send_command()
609 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
613 if (!msb_write_regs(msb, in h_msb_send_command()
616 &msb->regs.extra_data)) in h_msb_send_command()
619 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
623 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1); in h_msb_send_command()
624 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
628 msb->state = MSB_SC_RECEIVE_INT_REQ; in h_msb_send_command()
629 if (msb_read_int_reg(msb, -1)) in h_msb_send_command()
637 return msb_exit_state_machine(msb, -EIO); in h_msb_send_command()
639 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_send_command()
642 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
646 return msb_exit_state_machine(msb, 0); in h_msb_send_command()
657 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_reset() local
661 return msb_exit_state_machine(msb, mrq->error); in h_msb_reset()
663 switch (msb->state) { in h_msb_reset()
667 msb->state = MSB_RS_CONFIRM; in h_msb_reset()
670 return msb_exit_state_machine(msb, 0); in h_msb_reset()
679 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_parallel_switch() local
685 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
686 return msb_exit_state_machine(msb, mrq->error); in h_msb_parallel_switch()
689 switch (msb->state) { in h_msb_parallel_switch()
692 msb->regs.param.system |= MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
694 if (!msb_write_regs(msb, in h_msb_parallel_switch()
697 (unsigned char *)&msb->regs.param)) in h_msb_parallel_switch()
700 msb->state = MSB_PS_SWICH_HOST; in h_msb_parallel_switch()
708 msb->state = MSB_PS_CONFIRM; in h_msb_parallel_switch()
712 return msb_exit_state_machine(msb, 0); in h_msb_parallel_switch()
718 static int msb_switch_to_parallel(struct msb_data *msb);
721 static int msb_reset(struct msb_data *msb, bool full) in msb_reset() argument
724 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM; in msb_reset()
725 struct memstick_dev *card = msb->card; in msb_reset()
730 msb->regs.param.system = MEMSTICK_SYS_BAMD; in msb_reset()
738 msb_invalidate_reg_window(msb); in msb_reset()
750 msb->read_only = true; in msb_reset()
755 error = msb_run_state_machine(msb, h_msb_reset); in msb_reset()
758 msb->read_only = true; in msb_reset()
764 msb_switch_to_parallel(msb); in msb_reset()
769 static int msb_switch_to_parallel(struct msb_data *msb) in msb_switch_to_parallel() argument
773 error = msb_run_state_machine(msb, h_msb_parallel_switch); in msb_switch_to_parallel()
776 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in msb_switch_to_parallel()
777 msb_reset(msb, true); in msb_switch_to_parallel()
781 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; in msb_switch_to_parallel()
786 static int msb_set_overwrite_flag(struct msb_data *msb, in msb_set_overwrite_flag() argument
789 if (msb->read_only) in msb_set_overwrite_flag()
792 msb->regs.param.block_address = cpu_to_be16(pba); in msb_set_overwrite_flag()
793 msb->regs.param.page_address = page; in msb_set_overwrite_flag()
794 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE; in msb_set_overwrite_flag()
795 msb->regs.extra_data.overwrite_flag = flag; in msb_set_overwrite_flag()
796 msb->command_value = MS_CMD_BLOCK_WRITE; in msb_set_overwrite_flag()
797 msb->command_need_oob = true; in msb_set_overwrite_flag()
801 return msb_run_state_machine(msb, h_msb_send_command); in msb_set_overwrite_flag()
804 static int msb_mark_bad(struct msb_data *msb, int pba) in msb_mark_bad() argument
807 msb_reset(msb, true); in msb_mark_bad()
809 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST); in msb_mark_bad()
812 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page) in msb_mark_page_bad() argument
815 msb_reset(msb, true); in msb_mark_page_bad()
816 return msb_set_overwrite_flag(msb, in msb_mark_page_bad()
821 static int msb_erase_block(struct msb_data *msb, u16 pba) in msb_erase_block() argument
824 if (msb->read_only) in msb_erase_block()
830 msb->regs.param.block_address = cpu_to_be16(pba); in msb_erase_block()
831 msb->regs.param.page_address = 0; in msb_erase_block()
832 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_erase_block()
833 msb->command_value = MS_CMD_BLOCK_ERASE; in msb_erase_block()
834 msb->command_need_oob = false; in msb_erase_block()
837 error = msb_run_state_machine(msb, h_msb_send_command); in msb_erase_block()
838 if (!error || msb_reset(msb, true)) in msb_erase_block()
844 msb_mark_bad(msb, pba); in msb_erase_block()
848 msb_mark_block_unused(msb, pba); in msb_erase_block()
849 __set_bit(pba, msb->erased_blocks_bitmap); in msb_erase_block()
854 static int msb_read_page(struct msb_data *msb, in msb_read_page() argument
863 size_t len = msb->page_size; in msb_read_page()
897 if (pba >= msb->block_count) { in msb_read_page()
903 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_page()
904 msb->regs.param.page_address = page; in msb_read_page()
905 msb->regs.param.cp = MEMSTICK_CP_PAGE; in msb_read_page()
907 msb->current_sg = sg; in msb_read_page()
908 msb->current_sg_offset = offset; in msb_read_page()
909 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_page()
919 *extra = msb->regs.extra_data; in msb_read_page()
921 if (!error || msb_reset(msb, true)) in msb_read_page()
931 if (msb->regs.extra_data.overwrite_flag & in msb_read_page()
933 msb_mark_page_bad(msb, pba, page); in msb_read_page()
944 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page, in msb_read_oob() argument
950 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_oob()
951 msb->regs.param.page_address = page; in msb_read_oob()
952 msb->regs.param.cp = MEMSTICK_CP_EXTRA; in msb_read_oob()
954 if (pba > msb->block_count) { in msb_read_oob()
959 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_oob()
960 *extra = msb->regs.extra_data; in msb_read_oob()
972 static int msb_verify_block(struct msb_data *msb, u16 pba, in msb_verify_block() argument
978 sg_init_one(&sg, msb->block_buffer, msb->block_size); in msb_verify_block()
980 while (page < msb->pages_in_block) { in msb_verify_block()
982 error = msb_read_page(msb, pba, page, in msb_verify_block()
983 NULL, &sg, page * msb->page_size); in msb_verify_block()
990 msb->block_buffer, msb->block_size)) in msb_verify_block()
996 static int msb_write_block(struct msb_data *msb, in msb_write_block() argument
1000 BUG_ON(sg->length < msb->page_size); in msb_write_block()
1002 if (msb->read_only) in msb_write_block()
1011 if (pba >= msb->block_count || lba >= msb->logical_block_count) { in msb_write_block()
1022 if (pba == msb->boot_block_locations[0] || in msb_write_block()
1023 pba == msb->boot_block_locations[1]) { in msb_write_block()
1030 if (msb->read_only) in msb_write_block()
1033 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_write_block()
1034 msb->regs.param.page_address = 0; in msb_write_block()
1035 msb->regs.param.block_address = cpu_to_be16(pba); in msb_write_block()
1037 msb->regs.extra_data.management_flag = 0xFF; in msb_write_block()
1038 msb->regs.extra_data.overwrite_flag = 0xF8; in msb_write_block()
1039 msb->regs.extra_data.logical_address = cpu_to_be16(lba); in msb_write_block()
1041 msb->current_sg = sg; in msb_write_block()
1042 msb->current_sg_offset = offset; in msb_write_block()
1043 msb->current_page = 0; in msb_write_block()
1045 error = msb_run_state_machine(msb, h_msb_write_block); in msb_write_block()
1054 !test_bit(pba, msb->erased_blocks_bitmap))) in msb_write_block()
1055 error = msb_verify_block(msb, pba, sg, offset); in msb_write_block()
1060 if (current_try > 1 || msb_reset(msb, true)) in msb_write_block()
1064 error = msb_erase_block(msb, pba); in msb_write_block()
1074 static u16 msb_get_free_block(struct msb_data *msb, int zone) in msb_get_free_block() argument
1082 if (!msb->free_block_count[zone]) { in msb_get_free_block()
1084 msb->read_only = true; in msb_get_free_block()
1088 pos %= msb->free_block_count[zone]; in msb_get_free_block()
1091 msb->free_block_count[zone], pos); in msb_get_free_block()
1093 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1094 msb->block_count, pba); in msb_get_free_block()
1096 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1097 msb->block_count, pba + 1); in msb_get_free_block()
1101 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) { in msb_get_free_block()
1103 msb->read_only = true; in msb_get_free_block()
1107 msb_mark_block_used(msb, pba); in msb_get_free_block()
1111 static int msb_update_block(struct msb_data *msb, u16 lba, in msb_update_block() argument
1117 pba = msb->lba_to_pba_table[lba]; in msb_update_block()
1122 msb_set_overwrite_flag(msb, pba, 0, in msb_update_block()
1127 new_pba = msb_get_free_block(msb, in msb_update_block()
1137 error = msb_write_block(msb, new_pba, lba, sg, offset); in msb_update_block()
1139 msb_mark_bad(msb, new_pba); in msb_update_block()
1147 msb_erase_block(msb, pba); in msb_update_block()
1148 msb->lba_to_pba_table[lba] = new_pba; in msb_update_block()
1154 msb->read_only = true; in msb_update_block()
1186 static int msb_read_boot_blocks(struct msb_data *msb) in msb_read_boot_blocks() argument
1193 msb->boot_block_locations[0] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1194 msb->boot_block_locations[1] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1195 msb->boot_block_count = 0; in msb_read_boot_blocks()
1199 if (!msb->boot_page) { in msb_read_boot_blocks()
1205 msb->boot_page = page; in msb_read_boot_blocks()
1207 page = msb->boot_page; in msb_read_boot_blocks()
1209 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR; in msb_read_boot_blocks()
1214 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) { in msb_read_boot_blocks()
1231 msb->boot_block_locations[msb->boot_block_count] = pba; in msb_read_boot_blocks()
1234 msb->boot_block_count++; in msb_read_boot_blocks()
1236 if (msb->boot_block_count == 2) in msb_read_boot_blocks()
1240 if (!msb->boot_block_count) { in msb_read_boot_blocks()
1249 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr) in msb_read_bad_block_table() argument
1260 boot_block = &msb->boot_page[block_nr]; in msb_read_bad_block_table()
1261 pba = msb->boot_block_locations[block_nr]; in msb_read_bad_block_table()
1263 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID) in msb_read_bad_block_table()
1272 page = data_offset / msb->page_size; in msb_read_bad_block_table()
1273 page_offset = data_offset % msb->page_size; in msb_read_bad_block_table()
1275 DIV_ROUND_UP(data_size + page_offset, msb->page_size) * in msb_read_bad_block_table()
1276 msb->page_size; in msb_read_bad_block_table()
1289 error = msb_read_page(msb, pba, page, NULL, &sg, offset); in msb_read_bad_block_table()
1294 offset += msb->page_size; in msb_read_bad_block_table()
1296 if (page == msb->pages_in_block) { in msb_read_bad_block_table()
1308 if (bad_block >= msb->block_count) { in msb_read_bad_block_table()
1314 if (test_bit(bad_block, msb->used_blocks_bitmap)) { in msb_read_bad_block_table()
1321 msb_mark_block_used(msb, bad_block); in msb_read_bad_block_table()
1328 static int msb_ftl_initialize(struct msb_data *msb) in msb_ftl_initialize() argument
1332 if (msb->ftl_initialized) in msb_ftl_initialize()
1335 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1336 msb->logical_block_count = msb->zone_count * 496 - 2; in msb_ftl_initialize()
1338 msb->used_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); in msb_ftl_initialize()
1339 msb->erased_blocks_bitmap = kzalloc(msb->block_count / 8, GFP_KERNEL); in msb_ftl_initialize()
1340 msb->lba_to_pba_table = in msb_ftl_initialize()
1341 kmalloc_array(msb->logical_block_count, sizeof(u16), in msb_ftl_initialize()
1344 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table || in msb_ftl_initialize()
1345 !msb->erased_blocks_bitmap) { in msb_ftl_initialize()
1346 kfree(msb->used_blocks_bitmap); in msb_ftl_initialize()
1347 kfree(msb->lba_to_pba_table); in msb_ftl_initialize()
1348 kfree(msb->erased_blocks_bitmap); in msb_ftl_initialize()
1352 for (i = 0; i < msb->zone_count; i++) in msb_ftl_initialize()
1353 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1355 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID, in msb_ftl_initialize()
1356 msb->logical_block_count * sizeof(u16)); in msb_ftl_initialize()
1359 msb->zone_count, msb->logical_block_count); in msb_ftl_initialize()
1361 msb->ftl_initialized = true; in msb_ftl_initialize()
1365 static int msb_ftl_scan(struct msb_data *msb) in msb_ftl_scan() argument
1371 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL); in msb_ftl_scan()
1377 for (pba = 0; pba < msb->block_count; pba++) { in msb_ftl_scan()
1379 if (pba == msb->boot_block_locations[0] || in msb_ftl_scan()
1380 pba == msb->boot_block_locations[1]) { in msb_ftl_scan()
1382 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1386 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_ftl_scan()
1392 error = msb_read_oob(msb, pba, 0, &extra); in msb_ftl_scan()
1398 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1399 msb_erase_block(msb, pba); in msb_ftl_scan()
1417 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1426 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1434 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1435 msb_erase_block(msb, pba); in msb_ftl_scan()
1444 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1450 msb_erase_block(msb, pba); in msb_ftl_scan()
1455 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) { in msb_ftl_scan()
1457 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1461 other_block = msb->lba_to_pba_table[lba]; in msb_ftl_scan()
1469 msb_erase_block(msb, other_block); in msb_ftl_scan()
1470 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1477 msb_erase_block(msb, pba); in msb_ftl_scan()
1484 msb_erase_block(msb, other_block); in msb_ftl_scan()
1485 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1495 struct msb_data *msb = from_timer(msb, t, cache_flush_timer); in msb_cache_flush_timer() local
1496 msb->need_flush_cache = true; in msb_cache_flush_timer()
1497 queue_work(msb->io_queue, &msb->io_work); in msb_cache_flush_timer()
1501 static void msb_cache_discard(struct msb_data *msb) in msb_cache_discard() argument
1503 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_discard()
1506 del_timer_sync(&msb->cache_flush_timer); in msb_cache_discard()
1509 msb->cache_block_lba = MS_BLOCK_INVALID; in msb_cache_discard()
1510 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block); in msb_cache_discard()
1513 static int msb_cache_init(struct msb_data *msb) in msb_cache_init() argument
1515 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0); in msb_cache_init()
1517 if (!msb->cache) in msb_cache_init()
1518 msb->cache = kzalloc(msb->block_size, GFP_KERNEL); in msb_cache_init()
1519 if (!msb->cache) in msb_cache_init()
1522 msb_cache_discard(msb); in msb_cache_init()
1526 static int msb_cache_flush(struct msb_data *msb) in msb_cache_flush() argument
1533 if (msb->read_only) in msb_cache_flush()
1536 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_flush()
1539 lba = msb->cache_block_lba; in msb_cache_flush()
1540 pba = msb->lba_to_pba_table[lba]; in msb_cache_flush()
1543 pba, msb->cache_block_lba); in msb_cache_flush()
1545 sg_init_one(&sg, msb->cache , msb->block_size); in msb_cache_flush()
1548 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1550 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1553 offset = page * msb->page_size; in msb_cache_flush()
1557 error = msb_read_page(msb, pba, page, &extra, &sg, offset); in msb_cache_flush()
1574 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_flush()
1578 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0); in msb_cache_flush()
1579 pba = msb->lba_to_pba_table[msb->cache_block_lba]; in msb_cache_flush()
1583 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1585 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1590 msb_set_overwrite_flag(msb, in msb_cache_flush()
1595 msb_cache_discard(msb); in msb_cache_flush()
1599 static int msb_cache_write(struct msb_data *msb, int lba, in msb_cache_write() argument
1605 if (msb->read_only) in msb_cache_write()
1608 if (msb->cache_block_lba == MS_BLOCK_INVALID || in msb_cache_write()
1609 lba != msb->cache_block_lba) in msb_cache_write()
1614 if (msb->cache_block_lba != MS_BLOCK_INVALID && in msb_cache_write()
1615 lba != msb->cache_block_lba) { in msb_cache_write()
1617 error = msb_cache_flush(msb); in msb_cache_write()
1622 if (msb->cache_block_lba == MS_BLOCK_INVALID) { in msb_cache_write()
1623 msb->cache_block_lba = lba; in msb_cache_write()
1624 mod_timer(&msb->cache_flush_timer, in msb_cache_write()
1631 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); in msb_cache_write()
1634 msb->cache + page * msb->page_size, msb->page_size); in msb_cache_write()
1636 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_write()
1640 static int msb_cache_read(struct msb_data *msb, int lba, in msb_cache_read() argument
1643 int pba = msb->lba_to_pba_table[lba]; in msb_cache_read()
1647 if (lba == msb->cache_block_lba && in msb_cache_read()
1648 test_bit(page, &msb->valid_cache_bitmap)) { in msb_cache_read()
1655 offset, msb->page_size); in msb_cache_read()
1657 msb->cache + msb->page_size * page, in msb_cache_read()
1658 msb->page_size); in msb_cache_read()
1663 error = msb_read_page(msb, pba, page, NULL, sg, offset); in msb_cache_read()
1667 msb_cache_write(msb, lba, page, true, sg, offset); in msb_cache_read()
1692 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_card() local
1697 msb->caps = 0; in msb_init_card()
1701 msb->read_only = true; in msb_init_card()
1703 msb->state = -1; in msb_init_card()
1704 error = msb_reset(msb, false); in msb_init_card()
1712 msb_switch_to_parallel(msb); in msb_init_card()
1714 msb->page_size = sizeof(struct ms_boot_page); in msb_init_card()
1717 error = msb_read_boot_blocks(msb); in msb_init_card()
1721 boot_block = &msb->boot_page[0]; in msb_init_card()
1724 msb->block_count = boot_block->attr.number_of_blocks; in msb_init_card()
1725 msb->page_size = boot_block->attr.page_size; in msb_init_card()
1727 msb->pages_in_block = boot_block->attr.block_size * 2; in msb_init_card()
1728 msb->block_size = msb->page_size * msb->pages_in_block; in msb_init_card()
1730 if (msb->page_size > PAGE_SIZE) { in msb_init_card()
1732 dbg("device page %d size isn't supported", msb->page_size); in msb_init_card()
1736 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL); in msb_init_card()
1737 if (!msb->block_buffer) in msb_init_card()
1740 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20; in msb_init_card()
1747 msb->geometry.cylinders = chs_table[i].cyl; in msb_init_card()
1748 msb->geometry.heads = chs_table[i].head; in msb_init_card()
1749 msb->geometry.sectors = chs_table[i].sec; in msb_init_card()
1754 msb->caps |= MEMSTICK_CAP_PAR4; in msb_init_card()
1757 msb->read_only = true; in msb_init_card()
1759 dbg("Total block count = %d", msb->block_count); in msb_init_card()
1760 dbg("Each block consists of %d pages", msb->pages_in_block); in msb_init_card()
1761 dbg("Page size = %d bytes", msb->page_size); in msb_init_card()
1762 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4)); in msb_init_card()
1763 dbg("Read only: %d", msb->read_only); in msb_init_card()
1767 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4) in msb_init_card()
1768 msb_switch_to_parallel(msb); in msb_init_card()
1771 error = msb_cache_init(msb); in msb_init_card()
1775 error = msb_ftl_initialize(msb); in msb_init_card()
1781 error = msb_read_bad_block_table(msb, 0); in msb_init_card()
1785 error = msb_read_bad_block_table(msb, 1); in msb_init_card()
1792 error = msb_ftl_scan(msb); in msb_init_card()
1802 static int msb_do_write_request(struct msb_data *msb, int lba, in msb_do_write_request() argument
1810 if (page == 0 && len - offset >= msb->block_size) { in msb_do_write_request()
1812 if (msb->cache_block_lba == lba) in msb_do_write_request()
1813 msb_cache_discard(msb); in msb_do_write_request()
1816 error = msb_update_block(msb, lba, sg, offset); in msb_do_write_request()
1820 offset += msb->block_size; in msb_do_write_request()
1821 *sucessfuly_written += msb->block_size; in msb_do_write_request()
1826 error = msb_cache_write(msb, lba, page, false, sg, offset); in msb_do_write_request()
1830 offset += msb->page_size; in msb_do_write_request()
1831 *sucessfuly_written += msb->page_size; in msb_do_write_request()
1834 if (page == msb->pages_in_block) { in msb_do_write_request()
1842 static int msb_do_read_request(struct msb_data *msb, int lba, in msb_do_read_request() argument
1851 error = msb_cache_read(msb, lba, page, sg, offset); in msb_do_read_request()
1855 offset += msb->page_size; in msb_do_read_request()
1856 *sucessfuly_read += msb->page_size; in msb_do_read_request()
1859 if (page == msb->pages_in_block) { in msb_do_read_request()
1869 struct msb_data *msb = container_of(work, struct msb_data, io_work); in msb_io_work() local
1872 struct scatterlist *sg = msb->prealloc_sg; in msb_io_work()
1878 spin_lock_irq(&msb->q_lock); in msb_io_work()
1880 if (msb->need_flush_cache) { in msb_io_work()
1881 msb->need_flush_cache = false; in msb_io_work()
1882 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1883 msb_cache_flush(msb); in msb_io_work()
1887 req = msb->req; in msb_io_work()
1890 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1894 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1898 blk_rq_map_sg(msb->queue, req, sg); in msb_io_work()
1902 sector_div(lba, msb->page_size / 512); in msb_io_work()
1903 page = sector_div(lba, msb->pages_in_block); in msb_io_work()
1905 if (rq_data_dir(msb->req) == READ) in msb_io_work()
1906 error = msb_do_read_request(msb, lba, page, sg, in msb_io_work()
1909 error = msb_do_write_request(msb, lba, page, sg, in msb_io_work()
1914 spin_lock_irq(&msb->q_lock); in msb_io_work()
1915 msb->req = NULL; in msb_io_work()
1916 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1919 if (error && msb->req) { in msb_io_work()
1924 spin_lock_irq(&msb->q_lock); in msb_io_work()
1925 msb->req = NULL; in msb_io_work()
1926 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1929 if (msb->req) in msb_io_work()
1940 struct msb_data *msb = disk->private_data; in msb_bd_open() local
1946 if (msb && msb->card) in msb_bd_open()
1947 msb->usage_count++; in msb_bd_open()
1953 static void msb_data_clear(struct msb_data *msb) in msb_data_clear() argument
1955 kfree(msb->boot_page); in msb_data_clear()
1956 kfree(msb->used_blocks_bitmap); in msb_data_clear()
1957 kfree(msb->lba_to_pba_table); in msb_data_clear()
1958 kfree(msb->cache); in msb_data_clear()
1959 msb->card = NULL; in msb_data_clear()
1964 struct msb_data *msb = disk->private_data; in msb_disk_release() local
1969 if (msb) { in msb_disk_release()
1970 if (msb->usage_count) in msb_disk_release()
1971 msb->usage_count--; in msb_disk_release()
1973 if (!msb->usage_count) { in msb_disk_release()
1975 idr_remove(&msb_disk_idr, msb->disk_id); in msb_disk_release()
1977 kfree(msb); in msb_disk_release()
1992 struct msb_data *msb = bdev->bd_disk->private_data; in msb_bd_getgeo() local
1993 *geo = msb->geometry; in msb_bd_getgeo()
2001 struct msb_data *msb = memstick_get_drvdata(card); in msb_queue_rq() local
2006 spin_lock_irq(&msb->q_lock); in msb_queue_rq()
2008 if (msb->card_dead) { in msb_queue_rq()
2011 WARN_ON(!msb->io_queue_stopped); in msb_queue_rq()
2013 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
2018 if (msb->req) { in msb_queue_rq()
2019 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
2024 msb->req = req; in msb_queue_rq()
2026 if (!msb->io_queue_stopped) in msb_queue_rq()
2027 queue_work(msb->io_queue, &msb->io_work); in msb_queue_rq()
2029 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
2035 struct msb_data *msb = memstick_get_drvdata(card); in msb_check_card() local
2036 return (msb->card_dead == 0); in msb_check_card()
2041 struct msb_data *msb = memstick_get_drvdata(card); in msb_stop() local
2046 blk_mq_stop_hw_queues(msb->queue); in msb_stop()
2047 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2048 msb->io_queue_stopped = true; in msb_stop()
2049 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2051 del_timer_sync(&msb->cache_flush_timer); in msb_stop()
2052 flush_workqueue(msb->io_queue); in msb_stop()
2054 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2055 if (msb->req) { in msb_stop()
2056 blk_mq_requeue_request(msb->req, false); in msb_stop()
2057 msb->req = NULL; in msb_stop()
2059 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2064 struct msb_data *msb = memstick_get_drvdata(card); in msb_start() local
2069 msb_invalidate_reg_window(msb); in msb_start()
2071 spin_lock_irqsave(&msb->q_lock, flags); in msb_start()
2072 if (!msb->io_queue_stopped || msb->card_dead) { in msb_start()
2073 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2076 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2079 msb->need_flush_cache = true; in msb_start()
2080 msb->io_queue_stopped = false; in msb_start()
2082 blk_mq_start_hw_queues(msb->queue); in msb_start()
2084 queue_work(msb->io_queue, &msb->io_work); in msb_start()
2102 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_disk() local
2107 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL); in msb_init_disk()
2110 if (msb->disk_id < 0) in msb_init_disk()
2111 return msb->disk_id; in msb_init_disk()
2113 msb->disk = alloc_disk(0); in msb_init_disk()
2114 if (!msb->disk) { in msb_init_disk()
2119 msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2, in msb_init_disk()
2121 if (IS_ERR(msb->queue)) { in msb_init_disk()
2122 rc = PTR_ERR(msb->queue); in msb_init_disk()
2123 msb->queue = NULL; in msb_init_disk()
2127 msb->queue->queuedata = card; in msb_init_disk()
2129 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES); in msb_init_disk()
2130 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS); in msb_init_disk()
2131 blk_queue_max_segment_size(msb->queue, in msb_init_disk()
2132 MS_BLOCK_MAX_PAGES * msb->page_size); in msb_init_disk()
2133 blk_queue_logical_block_size(msb->queue, msb->page_size); in msb_init_disk()
2135 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id); in msb_init_disk()
2136 msb->disk->fops = &msb_bdops; in msb_init_disk()
2137 msb->disk->private_data = msb; in msb_init_disk()
2138 msb->disk->queue = msb->queue; in msb_init_disk()
2139 msb->disk->flags |= GENHD_FL_EXT_DEVT; in msb_init_disk()
2141 capacity = msb->pages_in_block * msb->logical_block_count; in msb_init_disk()
2142 capacity *= (msb->page_size / 512); in msb_init_disk()
2143 set_capacity(msb->disk, capacity); in msb_init_disk()
2146 msb->usage_count = 1; in msb_init_disk()
2147 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); in msb_init_disk()
2148 INIT_WORK(&msb->io_work, msb_io_work); in msb_init_disk()
2149 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_init_disk()
2151 if (msb->read_only) in msb_init_disk()
2152 set_disk_ro(msb->disk, 1); in msb_init_disk()
2155 device_add_disk(&card->dev, msb->disk, NULL); in msb_init_disk()
2160 put_disk(msb->disk); in msb_init_disk()
2163 idr_remove(&msb_disk_idr, msb->disk_id); in msb_init_disk()
2170 struct msb_data *msb; in msb_probe() local
2173 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL); in msb_probe()
2174 if (!msb) in msb_probe()
2176 memstick_set_drvdata(card, msb); in msb_probe()
2177 msb->card = card; in msb_probe()
2178 spin_lock_init(&msb->q_lock); in msb_probe()
2193 msb_data_clear(msb); in msb_probe()
2194 kfree(msb); in msb_probe()
2200 struct msb_data *msb = memstick_get_drvdata(card); in msb_remove() local
2203 if (!msb->io_queue_stopped) in msb_remove()
2209 spin_lock_irqsave(&msb->q_lock, flags); in msb_remove()
2210 msb->card_dead = true; in msb_remove()
2211 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_remove()
2212 blk_mq_start_hw_queues(msb->queue); in msb_remove()
2215 del_gendisk(msb->disk); in msb_remove()
2216 blk_cleanup_queue(msb->queue); in msb_remove()
2217 blk_mq_free_tag_set(&msb->tag_set); in msb_remove()
2218 msb->queue = NULL; in msb_remove()
2221 msb_data_clear(msb); in msb_remove()
2224 msb_disk_release(msb->disk); in msb_remove()
2238 struct msb_data *msb = memstick_get_drvdata(card); in msb_resume() local
2243 msb->card_dead = true; in msb_resume()
2255 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_resume()
2260 if (msb->block_size != new_msb->block_size) in msb_resume()
2263 if (memcmp(msb->boot_page, new_msb->boot_page, in msb_resume()
2267 if (msb->logical_block_count != new_msb->logical_block_count || in msb_resume()
2268 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table, in msb_resume()
2269 msb->logical_block_count)) in msb_resume()
2272 if (msb->block_count != new_msb->block_count || in msb_resume()
2273 memcmp(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap, in msb_resume()
2274 msb->block_count / 8)) in msb_resume()
2282 msb->card_dead = card_dead; in msb_resume()
2283 memstick_set_drvdata(card, msb); in msb_resume()