Lines Matching defs:wq
11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
14 struct idxd_device *idxd = wq->idxd;
16 desc = wq->descs[idx];
27 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
30 struct idxd_device *idxd = wq->idxd;
38 sbq = &wq->sbq;
44 return __get_desc(wq, idx, cpu);
62 return __get_desc(wq, idx, cpu);
66 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
71 sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
75 static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
97 static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
126 found = list_abort_desc(wq, ie, desc);
148 * has better control of number of descriptors being submitted to a shared wq by limiting
149 * the number of driver allocated descriptors to the wq size. However, when the swq is
155 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
157 unsigned int retries = wq->enqcmds_retries;
170 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
172 struct idxd_device *idxd = wq->idxd;
181 if (!percpu_ref_tryget_live(&wq->wq_active)) {
182 wait_for_completion(&wq->wq_resurrect);
183 if (!percpu_ref_tryget_live(&wq->wq_active))
187 portal = idxd_wq_portal_addr(wq);
194 ie = &wq->ie;
206 if (wq_dedicated(wq)) {
209 rc = idxd_enqcmds(wq, portal, desc->hw);
211 percpu_ref_put(&wq->wq_active);
214 llist_abort_desc(wq, ie, desc);
219 percpu_ref_put(&wq->wq_active);