Lines Matching full:msb

147 static int msb_validate_used_block_bitmap(struct msb_data *msb)  in msb_validate_used_block_bitmap()  argument
155 for (i = 0; i < msb->zone_count; i++) in msb_validate_used_block_bitmap()
156 total_free_blocks += msb->free_block_count[i]; in msb_validate_used_block_bitmap()
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap, in msb_validate_used_block_bitmap()
159 msb->block_count) == total_free_blocks) in msb_validate_used_block_bitmap()
163 msb->read_only = true; in msb_validate_used_block_bitmap()
168 static void msb_mark_block_used(struct msb_data *msb, int pba) in msb_mark_block_used() argument
172 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_used()
175 msb->read_only = true; in msb_mark_block_used()
179 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_used()
183 __set_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_used()
184 msb->free_block_count[zone]--; in msb_mark_block_used()
188 static void msb_mark_block_unused(struct msb_data *msb, int pba) in msb_mark_block_unused() argument
192 if (!test_bit(pba, msb->used_blocks_bitmap)) { in msb_mark_block_unused()
194 msb->read_only = true; in msb_mark_block_unused()
198 if (msb_validate_used_block_bitmap(msb)) in msb_mark_block_unused()
202 __clear_bit(pba, msb->used_blocks_bitmap); in msb_mark_block_unused()
203 msb->free_block_count[zone]++; in msb_mark_block_unused()
207 static void msb_invalidate_reg_window(struct msb_data *msb) in msb_invalidate_reg_window() argument
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
210 msb->reg_addr.w_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id); in msb_invalidate_reg_window()
212 msb->reg_addr.r_length = sizeof(struct ms_id_register); in msb_invalidate_reg_window()
213 msb->addr_valid = false; in msb_invalidate_reg_window()
217 static int msb_run_state_machine(struct msb_data *msb, int (*state_func) in msb_run_state_machine() argument
220 struct memstick_dev *card = msb->card; in msb_run_state_machine()
222 WARN_ON(msb->state != -1); in msb_run_state_machine()
223 msb->int_polling = false; in msb_run_state_machine()
224 msb->state = 0; in msb_run_state_machine()
225 msb->exit_error = 0; in msb_run_state_machine()
233 WARN_ON(msb->state != -1); in msb_run_state_machine()
234 return msb->exit_error; in msb_run_state_machine()
238 static int msb_exit_state_machine(struct msb_data *msb, int error) in msb_exit_state_machine() argument
240 WARN_ON(msb->state == -1); in msb_exit_state_machine()
242 msb->state = -1; in msb_exit_state_machine()
243 msb->exit_error = error; in msb_exit_state_machine()
244 msb->card->next_request = h_msb_default_bad; in msb_exit_state_machine()
248 msb_invalidate_reg_window(msb); in msb_exit_state_machine()
250 complete(&msb->card->mrq_complete); in msb_exit_state_machine()
255 static int msb_read_int_reg(struct msb_data *msb, long timeout) in msb_read_int_reg() argument
257 struct memstick_request *mrq = &msb->card->current_mrq; in msb_read_int_reg()
259 WARN_ON(msb->state == -1); in msb_read_int_reg()
261 if (!msb->int_polling) { in msb_read_int_reg()
262 msb->int_timeout = jiffies + in msb_read_int_reg()
264 msb->int_polling = true; in msb_read_int_reg()
265 } else if (time_after(jiffies, msb->int_timeout)) { in msb_read_int_reg()
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) && in msb_read_int_reg()
282 static int msb_read_regs(struct msb_data *msb, int offset, int len) in msb_read_regs() argument
284 struct memstick_request *req = &msb->card->current_mrq; in msb_read_regs()
286 if (msb->reg_addr.r_offset != offset || in msb_read_regs()
287 msb->reg_addr.r_length != len || !msb->addr_valid) { in msb_read_regs()
289 msb->reg_addr.r_offset = offset; in msb_read_regs()
290 msb->reg_addr.r_length = len; in msb_read_regs()
291 msb->addr_valid = true; in msb_read_regs()
294 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_read_regs()
303 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf) in msb_write_regs() argument
305 struct memstick_request *req = &msb->card->current_mrq; in msb_write_regs()
307 if (msb->reg_addr.w_offset != offset || in msb_write_regs()
308 msb->reg_addr.w_length != len || !msb->addr_valid) { in msb_write_regs()
310 msb->reg_addr.w_offset = offset; in msb_write_regs()
311 msb->reg_addr.w_length = len; in msb_write_regs()
312 msb->addr_valid = true; in msb_write_regs()
315 &msb->reg_addr, sizeof(msb->reg_addr)); in msb_write_regs()
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
338 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_read_page() local
345 return msb_exit_state_machine(msb, mrq->error); in h_msb_read_page()
348 switch (msb->state) { in h_msb_read_page()
354 if (!msb_write_regs(msb, in h_msb_read_page()
357 (unsigned char *)&msb->regs.param)) in h_msb_read_page()
360 msb->state = MSB_RP_SEND_READ_COMMAND; in h_msb_read_page()
366 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT; in h_msb_read_page()
374 if (msb_read_int_reg(msb, -1)) in h_msb_read_page()
380 msb->regs.status.interrupt = intreg; in h_msb_read_page()
383 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
386 msb->state = MSB_RP_SEND_INT_REQ; in h_msb_read_page()
390 msb->int_polling = false; in h_msb_read_page()
391 msb->state = (intreg & MEMSTICK_INT_ERR) ? in h_msb_read_page()
397 if (!msb_read_regs(msb, in h_msb_read_page()
402 msb->state = MSB_RP_RECEIVE_STATUS_REG; in h_msb_read_page()
406 msb->regs.status = *(struct ms_status_register *)mrq->data; in h_msb_read_page()
407 msb->state = MSB_RP_SEND_OOB_READ; in h_msb_read_page()
411 if (!msb_read_regs(msb, in h_msb_read_page()
416 msb->state = MSB_RP_RECEIVE_OOB_READ; in h_msb_read_page()
420 msb->regs.extra_data = in h_msb_read_page()
422 msb->state = MSB_RP_SEND_READ_DATA; in h_msb_read_page()
427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) { in h_msb_read_page()
428 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_read_page()
434 msb->current_sg_offset, in h_msb_read_page()
435 msb->page_size); in h_msb_read_page()
438 msb->state = MSB_RP_RECEIVE_READ_DATA; in h_msb_read_page()
442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) { in h_msb_read_page()
443 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
444 return msb_exit_state_machine(msb, 0); in h_msb_read_page()
447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) { in h_msb_read_page()
449 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_read_page()
452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) { in h_msb_read_page()
454 msb->current_sg_offset += msb->page_size; in h_msb_read_page()
455 return msb_exit_state_machine(msb, -EUCLEAN); in h_msb_read_page()
458 return msb_exit_state_machine(msb, -EIO); in h_msb_read_page()
467 * Takes address from msb->regs.param.
469 * from msb->regs.extra
476 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_write_block() local
482 return msb_exit_state_machine(msb, mrq->error); in h_msb_write_block()
485 switch (msb->state) { in h_msb_write_block()
494 if (!msb_write_regs(msb, in h_msb_write_block()
497 &msb->regs.param)) in h_msb_write_block()
500 msb->state = MSB_WB_SEND_WRITE_OOB; in h_msb_write_block()
504 if (!msb_write_regs(msb, in h_msb_write_block()
507 &msb->regs.extra_data)) in h_msb_write_block()
509 msb->state = MSB_WB_SEND_WRITE_COMMAND; in h_msb_write_block()
516 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
520 msb->state = MSB_WB_RECEIVE_INT_REQ; in h_msb_write_block()
521 if (msb_read_int_reg(msb, -1)) in h_msb_write_block()
527 msb->regs.status.interrupt = intreg; in h_msb_write_block()
531 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
534 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_write_block()
538 if (msb->current_page == msb->pages_in_block) { in h_msb_write_block()
540 return msb_exit_state_machine(msb, 0); in h_msb_write_block()
541 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
548 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
552 msb->int_polling = false; in h_msb_write_block()
553 msb->state = MSB_WB_SEND_WRITE_DATA; in h_msb_write_block()
559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), in h_msb_write_block()
560 msb->current_sg_offset, in h_msb_write_block()
561 msb->page_size) < msb->page_size) in h_msb_write_block()
562 return msb_exit_state_machine(msb, -EIO); in h_msb_write_block()
566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION; in h_msb_write_block()
570 msb->current_page++; in h_msb_write_block()
571 msb->current_sg_offset += msb->page_size; in h_msb_write_block()
572 msb->state = MSB_WB_SEND_INT_REQ; in h_msb_write_block()
588 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_send_command() local
594 return msb_exit_state_machine(msb, mrq->error); in h_msb_send_command()
597 switch (msb->state) { in h_msb_send_command()
601 if (!msb_write_regs(msb, in h_msb_send_command()
604 &msb->regs.param)) in h_msb_send_command()
606 msb->state = MSB_SC_SEND_WRITE_OOB; in h_msb_send_command()
610 if (!msb->command_need_oob) { in h_msb_send_command()
611 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
615 if (!msb_write_regs(msb, in h_msb_send_command()
618 &msb->regs.extra_data)) in h_msb_send_command()
621 msb->state = MSB_SC_SEND_COMMAND; in h_msb_send_command()
625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1); in h_msb_send_command()
626 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
630 msb->state = MSB_SC_RECEIVE_INT_REQ; in h_msb_send_command()
631 if (msb_read_int_reg(msb, -1)) in h_msb_send_command()
639 return msb_exit_state_machine(msb, -EIO); in h_msb_send_command()
641 return msb_exit_state_machine(msb, -EBADMSG); in h_msb_send_command()
644 msb->state = MSB_SC_SEND_INT_REQ; in h_msb_send_command()
648 return msb_exit_state_machine(msb, 0); in h_msb_send_command()
659 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_reset() local
663 return msb_exit_state_machine(msb, mrq->error); in h_msb_reset()
665 switch (msb->state) { in h_msb_reset()
669 msb->state = MSB_RS_CONFIRM; in h_msb_reset()
672 return msb_exit_state_machine(msb, 0); in h_msb_reset()
681 struct msb_data *msb = memstick_get_drvdata(card); in h_msb_parallel_switch() local
687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
688 return msb_exit_state_machine(msb, mrq->error); in h_msb_parallel_switch()
691 switch (msb->state) { in h_msb_parallel_switch()
694 msb->regs.param.system |= MEMSTICK_SYS_PAM; in h_msb_parallel_switch()
696 if (!msb_write_regs(msb, in h_msb_parallel_switch()
699 (unsigned char *)&msb->regs.param)) in h_msb_parallel_switch()
702 msb->state = MSB_PS_SWICH_HOST; in h_msb_parallel_switch()
711 msb->state = MSB_PS_CONFIRM; in h_msb_parallel_switch()
715 return msb_exit_state_machine(msb, 0); in h_msb_parallel_switch()
721 static int msb_switch_to_parallel(struct msb_data *msb);
724 static int msb_reset(struct msb_data *msb, bool full) in msb_reset() argument
727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM; in msb_reset()
728 struct memstick_dev *card = msb->card; in msb_reset()
733 msb->regs.param.system = MEMSTICK_SYS_BAMD; in msb_reset()
741 msb_invalidate_reg_window(msb); in msb_reset()
753 msb->read_only = true; in msb_reset()
758 error = msb_run_state_machine(msb, h_msb_reset); in msb_reset()
761 msb->read_only = true; in msb_reset()
767 msb_switch_to_parallel(msb); in msb_reset()
772 static int msb_switch_to_parallel(struct msb_data *msb) in msb_switch_to_parallel() argument
776 error = msb_run_state_machine(msb, h_msb_parallel_switch); in msb_switch_to_parallel()
779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM; in msb_switch_to_parallel()
780 msb_reset(msb, true); in msb_switch_to_parallel()
784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT; in msb_switch_to_parallel()
789 static int msb_set_overwrite_flag(struct msb_data *msb, in msb_set_overwrite_flag() argument
792 if (msb->read_only) in msb_set_overwrite_flag()
795 msb->regs.param.block_address = cpu_to_be16(pba); in msb_set_overwrite_flag()
796 msb->regs.param.page_address = page; in msb_set_overwrite_flag()
797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE; in msb_set_overwrite_flag()
798 msb->regs.extra_data.overwrite_flag = flag; in msb_set_overwrite_flag()
799 msb->command_value = MS_CMD_BLOCK_WRITE; in msb_set_overwrite_flag()
800 msb->command_need_oob = true; in msb_set_overwrite_flag()
804 return msb_run_state_machine(msb, h_msb_send_command); in msb_set_overwrite_flag()
807 static int msb_mark_bad(struct msb_data *msb, int pba) in msb_mark_bad() argument
810 msb_reset(msb, true); in msb_mark_bad()
812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST); in msb_mark_bad()
815 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page) in msb_mark_page_bad() argument
818 msb_reset(msb, true); in msb_mark_page_bad()
819 return msb_set_overwrite_flag(msb, in msb_mark_page_bad()
824 static int msb_erase_block(struct msb_data *msb, u16 pba) in msb_erase_block() argument
828 if (msb->read_only) in msb_erase_block()
834 msb->regs.param.block_address = cpu_to_be16(pba); in msb_erase_block()
835 msb->regs.param.page_address = 0; in msb_erase_block()
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_erase_block()
837 msb->command_value = MS_CMD_BLOCK_ERASE; in msb_erase_block()
838 msb->command_need_oob = false; in msb_erase_block()
841 error = msb_run_state_machine(msb, h_msb_send_command); in msb_erase_block()
842 if (!error || msb_reset(msb, true)) in msb_erase_block()
848 msb_mark_bad(msb, pba); in msb_erase_block()
852 msb_mark_block_unused(msb, pba); in msb_erase_block()
853 __set_bit(pba, msb->erased_blocks_bitmap); in msb_erase_block()
858 static int msb_read_page(struct msb_data *msb, in msb_read_page() argument
867 size_t len = msb->page_size; in msb_read_page()
901 if (pba >= msb->block_count) { in msb_read_page()
907 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_page()
908 msb->regs.param.page_address = page; in msb_read_page()
909 msb->regs.param.cp = MEMSTICK_CP_PAGE; in msb_read_page()
911 msb->current_sg = sg; in msb_read_page()
912 msb->current_sg_offset = offset; in msb_read_page()
913 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_page()
923 *extra = msb->regs.extra_data; in msb_read_page()
925 if (!error || msb_reset(msb, true)) in msb_read_page()
935 if (msb->regs.extra_data.overwrite_flag & in msb_read_page()
937 msb_mark_page_bad(msb, pba, page); in msb_read_page()
948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page, in msb_read_oob() argument
954 msb->regs.param.block_address = cpu_to_be16(pba); in msb_read_oob()
955 msb->regs.param.page_address = page; in msb_read_oob()
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA; in msb_read_oob()
958 if (pba > msb->block_count) { in msb_read_oob()
963 error = msb_run_state_machine(msb, h_msb_read_page); in msb_read_oob()
964 *extra = msb->regs.extra_data; in msb_read_oob()
976 static int msb_verify_block(struct msb_data *msb, u16 pba, in msb_verify_block() argument
982 sg_init_one(&sg, msb->block_buffer, msb->block_size); in msb_verify_block()
984 while (page < msb->pages_in_block) { in msb_verify_block()
986 error = msb_read_page(msb, pba, page, in msb_verify_block()
987 NULL, &sg, page * msb->page_size); in msb_verify_block()
994 msb->block_buffer, msb->block_size)) in msb_verify_block()
1000 static int msb_write_block(struct msb_data *msb, in msb_write_block() argument
1005 BUG_ON(sg->length < msb->page_size); in msb_write_block()
1007 if (msb->read_only) in msb_write_block()
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) { in msb_write_block()
1027 if (pba == msb->boot_block_locations[0] || in msb_write_block()
1028 pba == msb->boot_block_locations[1]) { in msb_write_block()
1035 if (msb->read_only) in msb_write_block()
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK; in msb_write_block()
1039 msb->regs.param.page_address = 0; in msb_write_block()
1040 msb->regs.param.block_address = cpu_to_be16(pba); in msb_write_block()
1042 msb->regs.extra_data.management_flag = 0xFF; in msb_write_block()
1043 msb->regs.extra_data.overwrite_flag = 0xF8; in msb_write_block()
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba); in msb_write_block()
1046 msb->current_sg = sg; in msb_write_block()
1047 msb->current_sg_offset = offset; in msb_write_block()
1048 msb->current_page = 0; in msb_write_block()
1050 error = msb_run_state_machine(msb, h_msb_write_block); in msb_write_block()
1060 !test_bit(pba, msb->erased_blocks_bitmap))) in msb_write_block()
1061 error = msb_verify_block(msb, pba, sg, offset); in msb_write_block()
1066 if (current_try > 1 || msb_reset(msb, true)) in msb_write_block()
1070 error = msb_erase_block(msb, pba); in msb_write_block()
1080 static u16 msb_get_free_block(struct msb_data *msb, int zone) in msb_get_free_block() argument
1088 if (!msb->free_block_count[zone]) { in msb_get_free_block()
1090 msb->read_only = true; in msb_get_free_block()
1094 pos %= msb->free_block_count[zone]; in msb_get_free_block()
1097 msb->free_block_count[zone], pos); in msb_get_free_block()
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1100 msb->block_count, pba); in msb_get_free_block()
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap, in msb_get_free_block()
1103 msb->block_count, pba + 1); in msb_get_free_block()
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) { in msb_get_free_block()
1109 msb->read_only = true; in msb_get_free_block()
1113 msb_mark_block_used(msb, pba); in msb_get_free_block()
1117 static int msb_update_block(struct msb_data *msb, u16 lba, in msb_update_block() argument
1123 pba = msb->lba_to_pba_table[lba]; in msb_update_block()
1128 msb_set_overwrite_flag(msb, pba, 0, in msb_update_block()
1133 new_pba = msb_get_free_block(msb, in msb_update_block()
1143 error = msb_write_block(msb, new_pba, lba, sg, offset); in msb_update_block()
1145 msb_mark_bad(msb, new_pba); in msb_update_block()
1153 msb_erase_block(msb, pba); in msb_update_block()
1154 msb->lba_to_pba_table[lba] = new_pba; in msb_update_block()
1160 msb->read_only = true; in msb_update_block()
1192 static int msb_read_boot_blocks(struct msb_data *msb) in msb_read_boot_blocks() argument
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID; in msb_read_boot_blocks()
1201 msb->boot_block_count = 0; in msb_read_boot_blocks()
1205 if (!msb->boot_page) { in msb_read_boot_blocks()
1211 msb->boot_page = page; in msb_read_boot_blocks()
1213 page = msb->boot_page; in msb_read_boot_blocks()
1215 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR; in msb_read_boot_blocks()
1220 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) { in msb_read_boot_blocks()
1237 msb->boot_block_locations[msb->boot_block_count] = pba; in msb_read_boot_blocks()
1240 msb->boot_block_count++; in msb_read_boot_blocks()
1242 if (msb->boot_block_count == 2) in msb_read_boot_blocks()
1246 if (!msb->boot_block_count) { in msb_read_boot_blocks()
1255 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr) in msb_read_bad_block_table() argument
1266 boot_block = &msb->boot_page[block_nr]; in msb_read_bad_block_table()
1267 pba = msb->boot_block_locations[block_nr]; in msb_read_bad_block_table()
1269 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID) in msb_read_bad_block_table()
1278 page = data_offset / msb->page_size; in msb_read_bad_block_table()
1279 page_offset = data_offset % msb->page_size; in msb_read_bad_block_table()
1281 DIV_ROUND_UP(data_size + page_offset, msb->page_size) * in msb_read_bad_block_table()
1282 msb->page_size; in msb_read_bad_block_table()
1295 error = msb_read_page(msb, pba, page, NULL, &sg, offset); in msb_read_bad_block_table()
1300 offset += msb->page_size; in msb_read_bad_block_table()
1302 if (page == msb->pages_in_block) { in msb_read_bad_block_table()
1314 if (bad_block >= msb->block_count) { in msb_read_bad_block_table()
1320 if (test_bit(bad_block, msb->used_blocks_bitmap)) { in msb_read_bad_block_table()
1327 msb_mark_block_used(msb, bad_block); in msb_read_bad_block_table()
1334 static int msb_ftl_initialize(struct msb_data *msb) in msb_ftl_initialize() argument
1338 if (msb->ftl_initialized) in msb_ftl_initialize()
1341 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1342 msb->logical_block_count = msb->zone_count * 496 - 2; in msb_ftl_initialize()
1344 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); in msb_ftl_initialize()
1345 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL); in msb_ftl_initialize()
1346 msb->lba_to_pba_table = in msb_ftl_initialize()
1347 kmalloc_array(msb->logical_block_count, sizeof(u16), in msb_ftl_initialize()
1350 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table || in msb_ftl_initialize()
1351 !msb->erased_blocks_bitmap) { in msb_ftl_initialize()
1352 bitmap_free(msb->used_blocks_bitmap); in msb_ftl_initialize()
1353 bitmap_free(msb->erased_blocks_bitmap); in msb_ftl_initialize()
1354 kfree(msb->lba_to_pba_table); in msb_ftl_initialize()
1358 for (i = 0; i < msb->zone_count; i++) in msb_ftl_initialize()
1359 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE; in msb_ftl_initialize()
1361 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID, in msb_ftl_initialize()
1362 msb->logical_block_count * sizeof(u16)); in msb_ftl_initialize()
1365 msb->zone_count, msb->logical_block_count); in msb_ftl_initialize()
1367 msb->ftl_initialized = true; in msb_ftl_initialize()
1371 static int msb_ftl_scan(struct msb_data *msb) in msb_ftl_scan() argument
1377 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL); in msb_ftl_scan()
1383 for (pba = 0; pba < msb->block_count; pba++) { in msb_ftl_scan()
1385 if (pba == msb->boot_block_locations[0] || in msb_ftl_scan()
1386 pba == msb->boot_block_locations[1]) { in msb_ftl_scan()
1388 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1392 if (test_bit(pba, msb->used_blocks_bitmap)) { in msb_ftl_scan()
1398 error = msb_read_oob(msb, pba, 0, &extra); in msb_ftl_scan()
1404 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1405 msb_erase_block(msb, pba); in msb_ftl_scan()
1423 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1432 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1440 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1441 msb_erase_block(msb, pba); in msb_ftl_scan()
1450 msb_mark_block_used(msb, pba); in msb_ftl_scan()
1456 msb_erase_block(msb, pba); in msb_ftl_scan()
1461 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) { in msb_ftl_scan()
1463 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1467 other_block = msb->lba_to_pba_table[lba]; in msb_ftl_scan()
1475 msb_erase_block(msb, other_block); in msb_ftl_scan()
1476 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1483 msb_erase_block(msb, pba); in msb_ftl_scan()
1490 msb_erase_block(msb, other_block); in msb_ftl_scan()
1491 msb->lba_to_pba_table[lba] = pba; in msb_ftl_scan()
1501 struct msb_data *msb = from_timer(msb, t, cache_flush_timer); in msb_cache_flush_timer() local
1503 msb->need_flush_cache = true; in msb_cache_flush_timer()
1504 queue_work(msb->io_queue, &msb->io_work); in msb_cache_flush_timer()
1508 static void msb_cache_discard(struct msb_data *msb) in msb_cache_discard() argument
1510 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_discard()
1513 del_timer_sync(&msb->cache_flush_timer); in msb_cache_discard()
1516 msb->cache_block_lba = MS_BLOCK_INVALID; in msb_cache_discard()
1517 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block); in msb_cache_discard()
1520 static int msb_cache_init(struct msb_data *msb) in msb_cache_init() argument
1522 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0); in msb_cache_init()
1524 if (!msb->cache) in msb_cache_init()
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL); in msb_cache_init()
1526 if (!msb->cache) in msb_cache_init()
1529 msb_cache_discard(msb); in msb_cache_init()
1533 static int msb_cache_flush(struct msb_data *msb) in msb_cache_flush() argument
1540 if (msb->read_only) in msb_cache_flush()
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID) in msb_cache_flush()
1546 lba = msb->cache_block_lba; in msb_cache_flush()
1547 pba = msb->lba_to_pba_table[lba]; in msb_cache_flush()
1550 pba, msb->cache_block_lba); in msb_cache_flush()
1552 sg_init_one(&sg, msb->cache , msb->block_size); in msb_cache_flush()
1555 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1557 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1560 offset = page * msb->page_size; in msb_cache_flush()
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset); in msb_cache_flush()
1581 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_flush()
1585 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0); in msb_cache_flush()
1586 pba = msb->lba_to_pba_table[msb->cache_block_lba]; in msb_cache_flush()
1590 for (page = 0; page < msb->pages_in_block; page++) { in msb_cache_flush()
1592 if (test_bit(page, &msb->valid_cache_bitmap)) in msb_cache_flush()
1597 msb_set_overwrite_flag(msb, in msb_cache_flush()
1602 msb_cache_discard(msb); in msb_cache_flush()
1606 static int msb_cache_write(struct msb_data *msb, int lba, in msb_cache_write() argument
1612 if (msb->read_only) in msb_cache_write()
1615 if (msb->cache_block_lba == MS_BLOCK_INVALID || in msb_cache_write()
1616 lba != msb->cache_block_lba) in msb_cache_write()
1621 if (msb->cache_block_lba != MS_BLOCK_INVALID && in msb_cache_write()
1622 lba != msb->cache_block_lba) { in msb_cache_write()
1624 error = msb_cache_flush(msb); in msb_cache_write()
1629 if (msb->cache_block_lba == MS_BLOCK_INVALID) { in msb_cache_write()
1630 msb->cache_block_lba = lba; in msb_cache_write()
1631 mod_timer(&msb->cache_flush_timer, in msb_cache_write()
1638 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); in msb_cache_write()
1641 msb->cache + page * msb->page_size, msb->page_size); in msb_cache_write()
1643 set_bit(page, &msb->valid_cache_bitmap); in msb_cache_write()
1647 static int msb_cache_read(struct msb_data *msb, int lba, in msb_cache_read() argument
1650 int pba = msb->lba_to_pba_table[lba]; in msb_cache_read()
1654 if (lba == msb->cache_block_lba && in msb_cache_read()
1655 test_bit(page, &msb->valid_cache_bitmap)) { in msb_cache_read()
1662 offset, msb->page_size); in msb_cache_read()
1664 msb->cache + msb->page_size * page, in msb_cache_read()
1665 msb->page_size); in msb_cache_read()
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset); in msb_cache_read()
1674 msb_cache_write(msb, lba, page, true, sg, offset); in msb_cache_read()
1700 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_card() local
1705 msb->caps = 0; in msb_init_card()
1709 msb->read_only = true; in msb_init_card()
1711 msb->state = -1; in msb_init_card()
1712 error = msb_reset(msb, false); in msb_init_card()
1721 msb_switch_to_parallel(msb); in msb_init_card()
1723 msb->page_size = sizeof(struct ms_boot_page); in msb_init_card()
1726 error = msb_read_boot_blocks(msb); in msb_init_card()
1730 boot_block = &msb->boot_page[0]; in msb_init_card()
1733 msb->block_count = boot_block->attr.number_of_blocks; in msb_init_card()
1734 msb->page_size = boot_block->attr.page_size; in msb_init_card()
1736 msb->pages_in_block = boot_block->attr.block_size * 2; in msb_init_card()
1737 msb->block_size = msb->page_size * msb->pages_in_block; in msb_init_card()
1739 if ((size_t)msb->page_size > PAGE_SIZE) { in msb_init_card()
1741 dbg("device page %d size isn't supported", msb->page_size); in msb_init_card()
1745 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL); in msb_init_card()
1746 if (!msb->block_buffer) in msb_init_card()
1749 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20; in msb_init_card()
1756 msb->geometry.cylinders = chs_table[i].cyl; in msb_init_card()
1757 msb->geometry.heads = chs_table[i].head; in msb_init_card()
1758 msb->geometry.sectors = chs_table[i].sec; in msb_init_card()
1763 msb->caps |= MEMSTICK_CAP_PAR4; in msb_init_card()
1766 msb->read_only = true; in msb_init_card()
1768 dbg("Total block count = %d", msb->block_count); in msb_init_card()
1769 dbg("Each block consists of %d pages", msb->pages_in_block); in msb_init_card()
1770 dbg("Page size = %d bytes", msb->page_size); in msb_init_card()
1771 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4)); in msb_init_card()
1772 dbg("Read only: %d", msb->read_only); in msb_init_card()
1776 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4) in msb_init_card()
1777 msb_switch_to_parallel(msb); in msb_init_card()
1780 error = msb_cache_init(msb); in msb_init_card()
1784 error = msb_ftl_initialize(msb); in msb_init_card()
1790 error = msb_read_bad_block_table(msb, 0); in msb_init_card()
1794 error = msb_read_bad_block_table(msb, 1); in msb_init_card()
1801 error = msb_ftl_scan(msb); in msb_init_card()
1811 static int msb_do_write_request(struct msb_data *msb, int lba, in msb_do_write_request() argument
1819 if (page == 0 && len - offset >= msb->block_size) { in msb_do_write_request()
1821 if (msb->cache_block_lba == lba) in msb_do_write_request()
1822 msb_cache_discard(msb); in msb_do_write_request()
1825 error = msb_update_block(msb, lba, sg, offset); in msb_do_write_request()
1829 offset += msb->block_size; in msb_do_write_request()
1830 *sucessfuly_written += msb->block_size; in msb_do_write_request()
1835 error = msb_cache_write(msb, lba, page, false, sg, offset); in msb_do_write_request()
1839 offset += msb->page_size; in msb_do_write_request()
1840 *sucessfuly_written += msb->page_size; in msb_do_write_request()
1843 if (page == msb->pages_in_block) { in msb_do_write_request()
1851 static int msb_do_read_request(struct msb_data *msb, int lba, in msb_do_read_request() argument
1860 error = msb_cache_read(msb, lba, page, sg, offset); in msb_do_read_request()
1864 offset += msb->page_size; in msb_do_read_request()
1865 *sucessfuly_read += msb->page_size; in msb_do_read_request()
1868 if (page == msb->pages_in_block) { in msb_do_read_request()
1878 struct msb_data *msb = container_of(work, struct msb_data, io_work); in msb_io_work() local
1881 struct scatterlist *sg = msb->prealloc_sg; in msb_io_work()
1887 spin_lock_irq(&msb->q_lock); in msb_io_work()
1889 if (msb->need_flush_cache) { in msb_io_work()
1890 msb->need_flush_cache = false; in msb_io_work()
1891 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1892 msb_cache_flush(msb); in msb_io_work()
1896 req = msb->req; in msb_io_work()
1899 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1903 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1907 blk_rq_map_sg(msb->queue, req, sg); in msb_io_work()
1911 sector_div(lba, msb->page_size / 512); in msb_io_work()
1912 page = sector_div(lba, msb->pages_in_block); in msb_io_work()
1914 if (rq_data_dir(msb->req) == READ) in msb_io_work()
1915 error = msb_do_read_request(msb, lba, page, sg, in msb_io_work()
1918 error = msb_do_write_request(msb, lba, page, sg, in msb_io_work()
1923 spin_lock_irq(&msb->q_lock); in msb_io_work()
1924 msb->req = NULL; in msb_io_work()
1925 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1928 if (error && msb->req) { in msb_io_work()
1933 spin_lock_irq(&msb->q_lock); in msb_io_work()
1934 msb->req = NULL; in msb_io_work()
1935 spin_unlock_irq(&msb->q_lock); in msb_io_work()
1938 if (msb->req) in msb_io_work()
1946 static void msb_data_clear(struct msb_data *msb) in msb_data_clear() argument
1948 kfree(msb->boot_page); in msb_data_clear()
1949 bitmap_free(msb->used_blocks_bitmap); in msb_data_clear()
1950 bitmap_free(msb->erased_blocks_bitmap); in msb_data_clear()
1951 kfree(msb->lba_to_pba_table); in msb_data_clear()
1952 kfree(msb->cache); in msb_data_clear()
1953 msb->card = NULL; in msb_data_clear()
1959 struct msb_data *msb = bdev->bd_disk->private_data; in msb_bd_getgeo() local
1960 *geo = msb->geometry; in msb_bd_getgeo()
1966 struct msb_data *msb = disk->private_data; in msb_bd_free_disk() local
1969 idr_remove(&msb_disk_idr, msb->disk_id); in msb_bd_free_disk()
1972 kfree(msb); in msb_bd_free_disk()
1979 struct msb_data *msb = memstick_get_drvdata(card); in msb_queue_rq() local
1984 spin_lock_irq(&msb->q_lock); in msb_queue_rq()
1986 if (msb->card_dead) { in msb_queue_rq()
1989 WARN_ON(!msb->io_queue_stopped); in msb_queue_rq()
1991 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
1996 if (msb->req) { in msb_queue_rq()
1997 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
2002 msb->req = req; in msb_queue_rq()
2004 if (!msb->io_queue_stopped) in msb_queue_rq()
2005 queue_work(msb->io_queue, &msb->io_work); in msb_queue_rq()
2007 spin_unlock_irq(&msb->q_lock); in msb_queue_rq()
2013 struct msb_data *msb = memstick_get_drvdata(card); in msb_check_card() local
2015 return (msb->card_dead == 0); in msb_check_card()
2020 struct msb_data *msb = memstick_get_drvdata(card); in msb_stop() local
2025 blk_mq_stop_hw_queues(msb->queue); in msb_stop()
2026 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2027 msb->io_queue_stopped = true; in msb_stop()
2028 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2030 del_timer_sync(&msb->cache_flush_timer); in msb_stop()
2031 flush_workqueue(msb->io_queue); in msb_stop()
2033 spin_lock_irqsave(&msb->q_lock, flags); in msb_stop()
2034 if (msb->req) { in msb_stop()
2035 blk_mq_requeue_request(msb->req, false); in msb_stop()
2036 msb->req = NULL; in msb_stop()
2038 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_stop()
2043 struct msb_data *msb = memstick_get_drvdata(card); in msb_start() local
2048 msb_invalidate_reg_window(msb); in msb_start()
2050 spin_lock_irqsave(&msb->q_lock, flags); in msb_start()
2051 if (!msb->io_queue_stopped || msb->card_dead) { in msb_start()
2052 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2055 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_start()
2058 msb->need_flush_cache = true; in msb_start()
2059 msb->io_queue_stopped = false; in msb_start()
2061 blk_mq_start_hw_queues(msb->queue); in msb_start()
2063 queue_work(msb->io_queue, &msb->io_work); in msb_start()
2080 struct msb_data *msb = memstick_get_drvdata(card); in msb_init_disk() local
2082 .logical_block_size = msb->page_size, in msb_init_disk()
2085 .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size, in msb_init_disk()
2091 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL); in msb_init_disk()
2094 if (msb->disk_id < 0) in msb_init_disk()
2095 return msb->disk_id; in msb_init_disk()
2097 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2, in msb_init_disk()
2102 msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card); in msb_init_disk()
2103 if (IS_ERR(msb->disk)) { in msb_init_disk()
2104 rc = PTR_ERR(msb->disk); in msb_init_disk()
2107 msb->queue = msb->disk->queue; in msb_init_disk()
2109 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id); in msb_init_disk()
2110 msb->disk->fops = &msb_bdops; in msb_init_disk()
2111 msb->disk->private_data = msb; in msb_init_disk()
2113 capacity = msb->pages_in_block * msb->logical_block_count; in msb_init_disk()
2114 capacity *= (msb->page_size / 512); in msb_init_disk()
2115 set_capacity(msb->disk, capacity); in msb_init_disk()
2118 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM); in msb_init_disk()
2119 if (!msb->io_queue) { in msb_init_disk()
2124 INIT_WORK(&msb->io_work, msb_io_work); in msb_init_disk()
2125 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_init_disk()
2127 if (msb->read_only) in msb_init_disk()
2128 set_disk_ro(msb->disk, 1); in msb_init_disk()
2131 rc = device_add_disk(&card->dev, msb->disk, NULL); in msb_init_disk()
2138 destroy_workqueue(msb->io_queue); in msb_init_disk()
2140 put_disk(msb->disk); in msb_init_disk()
2142 blk_mq_free_tag_set(&msb->tag_set); in msb_init_disk()
2145 idr_remove(&msb_disk_idr, msb->disk_id); in msb_init_disk()
2152 struct msb_data *msb; in msb_probe() local
2155 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL); in msb_probe()
2156 if (!msb) in msb_probe()
2158 memstick_set_drvdata(card, msb); in msb_probe()
2159 msb->card = card; in msb_probe()
2160 spin_lock_init(&msb->q_lock); in msb_probe()
2175 msb_data_clear(msb); in msb_probe()
2176 kfree(msb); in msb_probe()
2182 struct msb_data *msb = memstick_get_drvdata(card); in msb_remove() local
2185 if (!msb->io_queue_stopped) in msb_remove()
2191 spin_lock_irqsave(&msb->q_lock, flags); in msb_remove()
2192 msb->card_dead = true; in msb_remove()
2193 spin_unlock_irqrestore(&msb->q_lock, flags); in msb_remove()
2194 blk_mq_start_hw_queues(msb->queue); in msb_remove()
2197 del_gendisk(msb->disk); in msb_remove()
2198 blk_mq_free_tag_set(&msb->tag_set); in msb_remove()
2199 msb->queue = NULL; in msb_remove()
2202 msb_data_clear(msb); in msb_remove()
2205 put_disk(msb->disk); in msb_remove()
2219 struct msb_data *msb = memstick_get_drvdata(card); in msb_resume() local
2224 msb->card_dead = true; in msb_resume()
2236 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1); in msb_resume()
2241 if (msb->block_size != new_msb->block_size) in msb_resume()
2244 if (memcmp(msb->boot_page, new_msb->boot_page, in msb_resume()
2248 if (msb->logical_block_count != new_msb->logical_block_count || in msb_resume()
2249 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table, in msb_resume()
2250 msb->logical_block_count)) in msb_resume()
2253 if (msb->block_count != new_msb->block_count || in msb_resume()
2254 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap, in msb_resume()
2255 msb->block_count)) in msb_resume()
2263 msb->card_dead = card_dead; in msb_resume()
2264 memstick_set_drvdata(card, msb); in msb_resume()