Lines Matching +full:no +full:- +full:mmc

1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
22 struct mmc_host *mmc = hsq->mmc; in mmc_hsq_retry_handler() local
24 mmc->ops->request(mmc, hsq->mrq); in mmc_hsq_retry_handler()
29 struct mmc_host *mmc = hsq->mmc; in mmc_hsq_pump_requests() local
34 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_pump_requests()
37 if (hsq->mrq) { in mmc_hsq_pump_requests()
38 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
43 if (!hsq->qcnt || !hsq->enabled) { in mmc_hsq_pump_requests()
44 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
48 slot = &hsq->slot[hsq->next_tag]; in mmc_hsq_pump_requests()
49 hsq->mrq = slot->mrq; in mmc_hsq_pump_requests()
50 hsq->qcnt--; in mmc_hsq_pump_requests()
52 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_pump_requests()
54 if (mmc->ops->request_atomic) in mmc_hsq_pump_requests()
55 ret = mmc->ops->request_atomic(mmc, hsq->mrq); in mmc_hsq_pump_requests()
57 mmc->ops->request(mmc, hsq->mrq); in mmc_hsq_pump_requests()
61 * may be busy now, and we should change to non-atomic context to in mmc_hsq_pump_requests()
62 * try again for this unusual case, to avoid time-consuming operations in mmc_hsq_pump_requests()
68 if (ret == -EBUSY) in mmc_hsq_pump_requests()
69 schedule_work(&hsq->retry_work); in mmc_hsq_pump_requests()
80 * If there are no remain requests in software queue, then set a invalid in mmc_hsq_update_next_tag()
84 hsq->next_tag = HSQ_INVALID_TAG; in mmc_hsq_update_next_tag()
92 if (++hsq->next_tag != HSQ_INVALID_TAG) { in mmc_hsq_update_next_tag()
93 slot = &hsq->slot[hsq->next_tag]; in mmc_hsq_update_next_tag()
94 if (slot->mrq) in mmc_hsq_update_next_tag()
100 slot = &hsq->slot[tag]; in mmc_hsq_update_next_tag()
101 if (slot->mrq) in mmc_hsq_update_next_tag()
108 hsq->next_tag = tag; in mmc_hsq_update_next_tag()
116 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_post_request()
118 remains = hsq->qcnt; in mmc_hsq_post_request()
119 hsq->mrq = NULL; in mmc_hsq_post_request()
124 if (hsq->waiting_for_idle && !remains) { in mmc_hsq_post_request()
125 hsq->waiting_for_idle = false; in mmc_hsq_post_request()
126 wake_up(&hsq->wait_queue); in mmc_hsq_post_request()
130 if (hsq->recovery_halt) { in mmc_hsq_post_request()
131 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_post_request()
135 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_post_request()
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
153 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_finalize_request() argument
155 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_finalize_request()
158 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_finalize_request()
160 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) { in mmc_hsq_finalize_request()
161 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_finalize_request()
168 hsq->slot[hsq->next_tag].mrq = NULL; in mmc_hsq_finalize_request()
170 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_finalize_request()
172 mmc_cqe_request_done(mmc, hsq->mrq); in mmc_hsq_finalize_request()
180 static void mmc_hsq_recovery_start(struct mmc_host *mmc) in mmc_hsq_recovery_start() argument
182 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_recovery_start()
185 spin_lock_irqsave(&hsq->lock, flags); in mmc_hsq_recovery_start()
187 hsq->recovery_halt = true; in mmc_hsq_recovery_start()
189 spin_unlock_irqrestore(&hsq->lock, flags); in mmc_hsq_recovery_start()
192 static void mmc_hsq_recovery_finish(struct mmc_host *mmc) in mmc_hsq_recovery_finish() argument
194 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_recovery_finish()
197 spin_lock_irq(&hsq->lock); in mmc_hsq_recovery_finish()
199 hsq->recovery_halt = false; in mmc_hsq_recovery_finish()
200 remains = hsq->qcnt; in mmc_hsq_recovery_finish()
202 spin_unlock_irq(&hsq->lock); in mmc_hsq_recovery_finish()
212 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_request() argument
214 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_request()
215 int tag = mrq->tag; in mmc_hsq_request()
217 spin_lock_irq(&hsq->lock); in mmc_hsq_request()
219 if (!hsq->enabled) { in mmc_hsq_request()
220 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
221 return -ESHUTDOWN; in mmc_hsq_request()
225 if (hsq->recovery_halt) { in mmc_hsq_request()
226 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
227 return -EBUSY; in mmc_hsq_request()
230 hsq->slot[tag].mrq = mrq; in mmc_hsq_request()
233 * Set the next tag as current request tag if no available in mmc_hsq_request()
236 if (hsq->next_tag == HSQ_INVALID_TAG) in mmc_hsq_request()
237 hsq->next_tag = tag; in mmc_hsq_request()
239 hsq->qcnt++; in mmc_hsq_request()
241 spin_unlock_irq(&hsq->lock); in mmc_hsq_request()
248 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq) in mmc_hsq_post_req() argument
250 if (mmc->ops->post_req) in mmc_hsq_post_req()
251 mmc->ops->post_req(mmc, mrq, 0); in mmc_hsq_post_req()
258 spin_lock_irq(&hsq->lock); in mmc_hsq_queue_is_idle()
260 is_idle = (!hsq->mrq && !hsq->qcnt) || in mmc_hsq_queue_is_idle()
261 hsq->recovery_halt; in mmc_hsq_queue_is_idle()
263 *ret = hsq->recovery_halt ? -EBUSY : 0; in mmc_hsq_queue_is_idle()
264 hsq->waiting_for_idle = !is_idle; in mmc_hsq_queue_is_idle()
266 spin_unlock_irq(&hsq->lock); in mmc_hsq_queue_is_idle()
271 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc) in mmc_hsq_wait_for_idle() argument
273 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_wait_for_idle()
276 wait_event(hsq->wait_queue, in mmc_hsq_wait_for_idle()
282 static void mmc_hsq_disable(struct mmc_host *mmc) in mmc_hsq_disable() argument
284 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_disable()
288 spin_lock_irq(&hsq->lock); in mmc_hsq_disable()
290 if (!hsq->enabled) { in mmc_hsq_disable()
291 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
295 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
297 ret = wait_event_timeout(hsq->wait_queue, in mmc_hsq_disable()
301 pr_warn("could not stop mmc software queue\n"); in mmc_hsq_disable()
305 spin_lock_irq(&hsq->lock); in mmc_hsq_disable()
307 hsq->enabled = false; in mmc_hsq_disable()
309 spin_unlock_irq(&hsq->lock); in mmc_hsq_disable()
312 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card) in mmc_hsq_enable() argument
314 struct mmc_hsq *hsq = mmc->cqe_private; in mmc_hsq_enable()
316 spin_lock_irq(&hsq->lock); in mmc_hsq_enable()
318 if (hsq->enabled) { in mmc_hsq_enable()
319 spin_unlock_irq(&hsq->lock); in mmc_hsq_enable()
320 return -EBUSY; in mmc_hsq_enable()
323 hsq->enabled = true; in mmc_hsq_enable()
325 spin_unlock_irq(&hsq->lock); in mmc_hsq_enable()
340 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc) in mmc_hsq_init() argument
342 hsq->num_slots = HSQ_NUM_SLOTS; in mmc_hsq_init()
343 hsq->next_tag = HSQ_INVALID_TAG; in mmc_hsq_init()
345 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots, in mmc_hsq_init()
347 if (!hsq->slot) in mmc_hsq_init()
348 return -ENOMEM; in mmc_hsq_init()
350 hsq->mmc = mmc; in mmc_hsq_init()
351 hsq->mmc->cqe_private = hsq; in mmc_hsq_init()
352 mmc->cqe_ops = &mmc_hsq_ops; in mmc_hsq_init()
354 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler); in mmc_hsq_init()
355 spin_lock_init(&hsq->lock); in mmc_hsq_init()
356 init_waitqueue_head(&hsq->wait_queue); in mmc_hsq_init()
362 void mmc_hsq_suspend(struct mmc_host *mmc) in mmc_hsq_suspend() argument
364 mmc_hsq_disable(mmc); in mmc_hsq_suspend()
368 int mmc_hsq_resume(struct mmc_host *mmc) in mmc_hsq_resume() argument
370 return mmc_hsq_enable(mmc, NULL); in mmc_hsq_resume()
374 MODULE_DESCRIPTION("MMC Host Software Queue support");