Lines Matching refs:hwq
234 struct ufs_hw_queue *hwq; in ufshcd_mcq_memory_alloc() local
239 hwq = &hba->uhq[i]; in ufshcd_mcq_memory_alloc()
242 hwq->max_entries; in ufshcd_mcq_memory_alloc()
243 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, in ufshcd_mcq_memory_alloc()
244 &hwq->sqe_dma_addr, in ufshcd_mcq_memory_alloc()
246 if (!hwq->sqe_dma_addr) { in ufshcd_mcq_memory_alloc()
251 cqe_size = sizeof(struct cq_entry) * hwq->max_entries; in ufshcd_mcq_memory_alloc()
252 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, in ufshcd_mcq_memory_alloc()
253 &hwq->cqe_dma_addr, in ufshcd_mcq_memory_alloc()
255 if (!hwq->cqe_dma_addr) { in ufshcd_mcq_memory_alloc()
303 struct ufs_hw_queue *hwq) in ufshcd_mcq_process_cqe() argument
305 struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); in ufshcd_mcq_process_cqe()
316 struct ufs_hw_queue *hwq) in ufshcd_mcq_compl_all_cqes_lock() argument
319 u32 entries = hwq->max_entries; in ufshcd_mcq_compl_all_cqes_lock()
321 spin_lock_irqsave(&hwq->cq_lock, flags); in ufshcd_mcq_compl_all_cqes_lock()
323 ufshcd_mcq_process_cqe(hba, hwq); in ufshcd_mcq_compl_all_cqes_lock()
324 ufshcd_mcq_inc_cq_head_slot(hwq); in ufshcd_mcq_compl_all_cqes_lock()
328 ufshcd_mcq_update_cq_tail_slot(hwq); in ufshcd_mcq_compl_all_cqes_lock()
329 hwq->cq_head_slot = hwq->cq_tail_slot; in ufshcd_mcq_compl_all_cqes_lock()
330 spin_unlock_irqrestore(&hwq->cq_lock, flags); in ufshcd_mcq_compl_all_cqes_lock()
334 struct ufs_hw_queue *hwq) in ufshcd_mcq_poll_cqe_lock() argument
339 spin_lock_irqsave(&hwq->cq_lock, flags); in ufshcd_mcq_poll_cqe_lock()
340 ufshcd_mcq_update_cq_tail_slot(hwq); in ufshcd_mcq_poll_cqe_lock()
341 while (!ufshcd_mcq_is_cq_empty(hwq)) { in ufshcd_mcq_poll_cqe_lock()
342 ufshcd_mcq_process_cqe(hba, hwq); in ufshcd_mcq_poll_cqe_lock()
343 ufshcd_mcq_inc_cq_head_slot(hwq); in ufshcd_mcq_poll_cqe_lock()
348 ufshcd_mcq_update_cq_head(hwq); in ufshcd_mcq_poll_cqe_lock()
349 spin_unlock_irqrestore(&hwq->cq_lock, flags); in ufshcd_mcq_poll_cqe_lock()
357 struct ufs_hw_queue *hwq; in ufshcd_mcq_make_queues_operational() local
362 hwq = &hba->uhq[i]; in ufshcd_mcq_make_queues_operational()
363 hwq->id = i; in ufshcd_mcq_make_queues_operational()
364 qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1; in ufshcd_mcq_make_queues_operational()
367 ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), in ufshcd_mcq_make_queues_operational()
370 ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), in ufshcd_mcq_make_queues_operational()
380 ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), in ufshcd_mcq_make_queues_operational()
383 ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), in ufshcd_mcq_make_queues_operational()
393 hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; in ufshcd_mcq_make_queues_operational()
394 hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP; in ufshcd_mcq_make_queues_operational()
395 hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP; in ufshcd_mcq_make_queues_operational()
396 hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP; in ufshcd_mcq_make_queues_operational()
399 hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0; in ufshcd_mcq_make_queues_operational()
450 struct ufs_hw_queue *hwq; in ufshcd_mcq_init() local
476 hwq = &hba->uhq[i]; in ufshcd_mcq_init()
477 hwq->max_entries = hba->nutrs + 1; in ufshcd_mcq_init()
478 spin_lock_init(&hwq->sq_lock); in ufshcd_mcq_init()
479 spin_lock_init(&hwq->cq_lock); in ufshcd_mcq_init()
480 mutex_init(&hwq->sq_mutex); in ufshcd_mcq_init()
490 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq) in ufshcd_mcq_sq_stop() argument
493 u32 id = hwq->id, val; in ufshcd_mcq_sq_stop()
509 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq) in ufshcd_mcq_sq_start() argument
512 u32 id = hwq->id, val; in ufshcd_mcq_sq_start()
540 struct ufs_hw_queue *hwq; in ufshcd_mcq_sq_cleanup() local
551 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_mcq_sq_cleanup()
552 if (!hwq) in ufshcd_mcq_sq_cleanup()
555 hwq = hba->dev_cmd_queue; in ufshcd_mcq_sq_cleanup()
558 id = hwq->id; in ufshcd_mcq_sq_cleanup()
560 mutex_lock(&hwq->sq_mutex); in ufshcd_mcq_sq_cleanup()
563 err = ufshcd_mcq_sq_stop(hba, hwq); in ufshcd_mcq_sq_cleanup()
585 if (ufshcd_mcq_sq_start(hba, hwq)) in ufshcd_mcq_sq_cleanup()
589 mutex_unlock(&hwq->sq_mutex); in ufshcd_mcq_sq_cleanup()
618 struct ufs_hw_queue *hwq, int task_tag) in ufshcd_mcq_sqe_search() argument
630 mutex_lock(&hwq->sq_mutex); in ufshcd_mcq_sqe_search()
632 ufshcd_mcq_sq_stop(hba, hwq); in ufshcd_mcq_sqe_search()
633 sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq); in ufshcd_mcq_sqe_search()
634 if (sq_head_slot == hwq->sq_tail_slot) in ufshcd_mcq_sqe_search()
640 while (sq_head_slot != hwq->sq_tail_slot) { in ufshcd_mcq_sqe_search()
641 utrd = hwq->sqe_base_addr + sq_head_slot; in ufshcd_mcq_sqe_search()
650 if (sq_head_slot == hwq->max_entries) in ufshcd_mcq_sqe_search()
655 ufshcd_mcq_sq_start(hba, hwq); in ufshcd_mcq_sqe_search()
656 mutex_unlock(&hwq->sq_mutex); in ufshcd_mcq_sqe_search()
672 struct ufs_hw_queue *hwq; in ufshcd_mcq_abort() local
690 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); in ufshcd_mcq_abort()
692 if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { in ufshcd_mcq_abort()
698 __func__, hwq->id, tag); in ufshcd_mcq_abort()
714 spin_lock_irqsave(&hwq->cq_lock, flags); in ufshcd_mcq_abort()
717 spin_unlock_irqrestore(&hwq->cq_lock, flags); in ufshcd_mcq_abort()