Lines Matching defs:req

199 	struct sba_request *req = NULL;
202 list_for_each_entry(req, &sba->reqs_free_list, node) {
203 if (async_tx_test_ack(&req->tx)) {
204 list_move_tail(&req->node, &sba->reqs_alloc_list);
222 req->flags = SBA_REQUEST_STATE_ALLOCED;
223 req->first = req;
224 INIT_LIST_HEAD(&req->next);
225 atomic_set(&req->next_pending_count, 1);
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
228 async_tx_ack(&req->tx);
230 return req;
235 struct sba_request *req)
238 req->flags &= ~SBA_REQUEST_STATE_MASK;
239 req->flags |= SBA_REQUEST_STATE_PENDING;
240 list_move_tail(&req->node, &sba->reqs_pending_list);
247 struct sba_request *req)
254 req->flags &= ~SBA_REQUEST_STATE_MASK;
255 req->flags |= SBA_REQUEST_STATE_ACTIVE;
256 list_move_tail(&req->node, &sba->reqs_active_list);
257 if (req->flags & SBA_REQUEST_FENCE)
264 struct sba_request *req)
267 req->flags &= ~SBA_REQUEST_STATE_MASK;
268 req->flags |= SBA_REQUEST_STATE_ABORTED;
269 list_move_tail(&req->node, &sba->reqs_aborted_list);
276 struct sba_request *req)
279 req->flags &= ~SBA_REQUEST_STATE_MASK;
280 req->flags |= SBA_REQUEST_STATE_FREE;
281 list_move_tail(&req->node, &sba->reqs_free_list);
286 static void sba_free_chained_requests(struct sba_request *req)
290 struct sba_device *sba = req->sba;
294 _sba_free_request(sba, req);
295 list_for_each_entry(nreq, &req->next, next)
302 struct sba_request *req)
305 struct sba_device *sba = req->sba;
309 list_add_tail(&req->next, &first->next);
310 req->first = first;
319 struct sba_request *req, *req1;
324 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
325 _sba_free_request(sba, req);
328 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
329 _sba_abort_request(sba, req);
342 struct sba_request *req, *req1;
347 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
348 _sba_free_request(sba, req);
354 struct sba_request *req)
359 req->msg.error = 0;
360 ret = mbox_send_message(sba->mchan, &req->msg);
367 ret = req->msg.error;
383 struct sba_request *req;
389 req = list_first_entry(&sba->reqs_pending_list,
393 if (!_sba_active_request(sba, req))
397 ret = sba_send_mbox_request(sba, req);
399 _sba_pending_request(sba, req);
408 struct sba_request *req)
412 struct sba_request *nreq, *first = req->first;
452 struct sba_request *req;
458 list_for_each_entry(req, &sba->reqs_free_list, node)
459 if (async_tx_test_ack(&req->tx))
462 list_for_each_entry(req, &sba->reqs_alloc_list, node)
465 list_for_each_entry(req, &sba->reqs_pending_list, node)
468 list_for_each_entry(req, &sba->reqs_active_list, node)
471 list_for_each_entry(req, &sba->reqs_aborted_list, node)
520 struct sba_request *req, *nreq;
526 req = to_sba_request(tx);
531 _sba_pending_request(sba, req);
532 list_for_each_entry(nreq, &req->next, next)
555 static void sba_fillup_interrupt_msg(struct sba_request *req,
561 dma_addr_t resp_dma = req->tx.phys;
567 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
578 cmdsp->data_len = req->sba->hw_resp_size;
584 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
596 if (req->sba->hw_resp_size) {
599 cmdsp->resp_len = req->sba->hw_resp_size;
603 cmdsp->data_len = req->sba->hw_resp_size;
610 msg->ctx = req;
617 struct sba_request *req = NULL;
621 req = sba_alloc_request(sba);
622 if (!req)
629 req->flags |= SBA_REQUEST_FENCE;
632 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
635 req->tx.flags = flags;
636 req->tx.cookie = -EBUSY;
638 return &req->tx;
641 static void sba_fillup_memcpy_msg(struct sba_request *req,
649 dma_addr_t resp_dma = req->tx.phys;
684 if (req->sba->hw_resp_size) {
687 cmdsp->resp_len = req->sba->hw_resp_size;
698 msg->ctx = req;
707 struct sba_request *req = NULL;
710 req = sba_alloc_request(sba);
711 if (!req)
714 req->flags |= SBA_REQUEST_FENCE;
717 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
721 req->tx.flags = flags;
722 req->tx.cookie = -EBUSY;
724 return req;
734 struct sba_request *first = NULL, *req;
740 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
742 if (!req) {
749 sba_chain_request(first, req);
751 first = req;
760 static void sba_fillup_xor_msg(struct sba_request *req,
769 dma_addr_t resp_dma = req->tx.phys;
823 if (req->sba->hw_resp_size) {
826 cmdsp->resp_len = req->sba->hw_resp_size;
837 msg->ctx = req;
846 struct sba_request *req = NULL;
849 req = sba_alloc_request(sba);
850 if (!req)
853 req->flags |= SBA_REQUEST_FENCE;
856 sba_fillup_xor_msg(req, req->cmds, &req->msg,
860 req->tx.flags = flags;
861 req->tx.cookie = -EBUSY;
863 return req;
873 struct sba_request *first = NULL, *req;
883 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
885 if (!req) {
892 sba_chain_request(first, req);
894 first = req;
903 static void sba_fillup_pq_msg(struct sba_request *req,
914 dma_addr_t resp_dma = req->tx.phys;
1006 if (req->sba->hw_resp_size) {
1009 cmdsp->resp_len = req->sba->hw_resp_size;
1033 if (req->sba->hw_resp_size) {
1036 cmdsp->resp_len = req->sba->hw_resp_size;
1048 msg->ctx = req;
1057 struct sba_request *req = NULL;
1060 req = sba_alloc_request(sba);
1061 if (!req)
1064 req->flags |= SBA_REQUEST_FENCE;
1067 sba_fillup_pq_msg(req, dmaf_continue(flags),
1068 req->cmds, &req->msg,
1072 req->tx.flags = flags;
1073 req->tx.cookie = -EBUSY;
1075 return req;
1078 static void sba_fillup_pq_single_msg(struct sba_request *req,
1089 dma_addr_t resp_dma = req->tx.phys;
1166 if (req->sba->hw_resp_size) {
1169 cmdsp->resp_len = req->sba->hw_resp_size;
1194 pos = (dpos < req->sba->max_pq_coefs) ?
1195 dpos : (req->sba->max_pq_coefs - 1);
1223 pos = (dpos < req->sba->max_pq_coefs) ?
1224 dpos : (req->sba->max_pq_coefs - 1);
1287 if (req->sba->hw_resp_size) {
1290 cmdsp->resp_len = req->sba->hw_resp_size;
1302 msg->ctx = req;
1312 struct sba_request *req = NULL;
1315 req = sba_alloc_request(sba);
1316 if (!req)
1319 req->flags |= SBA_REQUEST_FENCE;
1322 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1323 req->cmds, &req->msg, off, len,
1327 req->tx.flags = flags;
1328 req->tx.cookie = -EBUSY;
1330 return req;
1343 struct sba_request *first = NULL, *req;
1376 req = sba_prep_dma_pq_single_req(sba,
1379 if (!req)
1383 sba_chain_request(first, req);
1385 first = req;
1394 req = sba_prep_dma_pq_single_req(sba,
1397 if (!req)
1401 sba_chain_request(first, req);
1403 first = req;
1408 req = sba_prep_dma_pq_req(sba, off,
1411 if (!req)
1415 sba_chain_request(first, req);
1417 first = req;
1437 struct sba_request *req = m->ctx;
1438 struct sba_device *sba = req->sba;
1446 sba_process_received_request(sba, req);
1466 struct sba_request *req = NULL;
1491 req = devm_kzalloc(sba->dev,
1492 struct_size(req, cmds, sba->max_cmd_per_req),
1494 if (!req) {
1498 INIT_LIST_HEAD(&req->node);
1499 req->sba = sba;
1500 req->flags = SBA_REQUEST_STATE_FREE;
1501 INIT_LIST_HEAD(&req->next);
1502 atomic_set(&req->next_pending_count, 0);
1504 req->cmds[j].cmd = 0;
1505 req->cmds[j].cmd_dma = sba->cmds_base +
1507 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1509 req->cmds[j].flags = 0;
1511 memset(&req->msg, 0, sizeof(req->msg));
1512 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1513 async_tx_ack(&req->tx);
1514 req->tx.tx_submit = sba_tx_submit;
1515 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
1516 list_add_tail(&req->node, &sba->reqs_free_list);