Lines Matching full:slice

156 	struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);  in free_slice()  local
158 slice->bo->total_slice_nents -= slice->nents; in free_slice()
159 list_del(&slice->slice); in free_slice()
160 drm_gem_object_put(&slice->bo->base); in free_slice()
161 sg_free_table(slice->sgt); in free_slice()
162 kfree(slice->sgt); in free_slice()
163 kfree(slice->reqs); in free_slice()
164 kfree(slice); in free_slice()
247 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, in encode_reqs() argument
259 if (!slice->no_xfer) in encode_reqs()
260 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); in encode_reqs()
290 * When we end up splitting up a single request (ie a buf slice) into in encode_reqs()
299 for_each_sgtable_sg(slice->sgt, sg, i) { in encode_reqs()
300 slice->reqs[i].cmd = cmd; in encode_reqs()
301 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
303 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
311 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); in encode_reqs()
314 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, in encode_reqs()
321 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, in encode_reqs()
328 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, in encode_reqs()
335 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, in encode_reqs()
346 slice->reqs[i].cmd |= GEN_COMPLETION; in encode_reqs()
347 slice->reqs[i].db_addr = db_addr; in encode_reqs()
348 slice->reqs[i].db_len = db_len; in encode_reqs()
349 slice->reqs[i].db_data = db_data; in encode_reqs()
364 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? in encode_reqs()
366 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, in encode_reqs()
369 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, in encode_reqs()
372 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, in encode_reqs()
375 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, in encode_reqs()
386 struct bo_slice *slice; in qaic_map_one_slice() local
393 slice = kmalloc(sizeof(*slice), GFP_KERNEL); in qaic_map_one_slice()
394 if (!slice) { in qaic_map_one_slice()
399 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); in qaic_map_one_slice()
400 if (!slice->reqs) { in qaic_map_one_slice()
405 slice->no_xfer = !slice_ent->size; in qaic_map_one_slice()
406 slice->sgt = sgt; in qaic_map_one_slice()
407 slice->nents = sgt->nents; in qaic_map_one_slice()
408 slice->dir = bo->dir; in qaic_map_one_slice()
409 slice->bo = bo; in qaic_map_one_slice()
410 slice->size = slice_ent->size; in qaic_map_one_slice()
411 slice->offset = slice_ent->offset; in qaic_map_one_slice()
413 ret = encode_reqs(qdev, slice, slice_ent); in qaic_map_one_slice()
418 kref_init(&slice->ref_count); in qaic_map_one_slice()
420 list_add_tail(&slice->slice, &bo->slices); in qaic_map_one_slice()
425 kfree(slice->reqs); in qaic_map_one_slice()
427 kfree(slice); in qaic_map_one_slice()
901 struct bo_slice *slice, *temp; in qaic_free_slices_bo() local
903 list_for_each_entry_safe(slice, temp, &bo->slices, slice) in qaic_free_slices_bo()
904 kref_put(&slice->ref_count, free_slice); in qaic_free_slices_bo()
1070 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, in copy_exec_reqs() argument
1074 struct dbc_req *reqs = slice->reqs; in copy_exec_reqs()
1079 if (avail < slice->nents) in copy_exec_reqs()
1082 if (tail + slice->nents > dbc->nelem) { in copy_exec_reqs()
1084 avail = min_t(u32, avail, slice->nents); in copy_exec_reqs()
1087 avail = slice->nents - avail; in copy_exec_reqs()
1091 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); in copy_exec_reqs()
1094 *ptail = (tail + slice->nents) % dbc->nelem; in copy_exec_reqs()
1099 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, in copy_partial_exec_reqs() argument
1103 struct dbc_req *reqs = slice->reqs; in copy_partial_exec_reqs()
1114 * of the last DMA request of this slice that needs to be in copy_partial_exec_reqs()
1119 for (first_n = 0; first_n < slice->nents; first_n++) in copy_partial_exec_reqs()
1147 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); in copy_partial_exec_reqs()
1173 struct bo_slice *slice; in send_bo_list_to_device() local
1218 list_for_each_entry(slice, &bo->slices, slice) { in send_bo_list_to_device()
1219 for (j = 0; j < slice->nents; j++) in send_bo_list_to_device()
1220 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); in send_bo_list_to_device()
1222 if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) in send_bo_list_to_device()
1223 /* Configure the slice for no DMA transfer */ in send_bo_list_to_device()
1224 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); in send_bo_list_to_device()
1225 else if (is_partial && pexec[i].resize < slice->offset + slice->size) in send_bo_list_to_device()
1226 /* Configure the slice to be partially DMA transferred */ in send_bo_list_to_device()
1227 ret = copy_partial_exec_reqs(qdev, slice, in send_bo_list_to_device()
1228 pexec[i].resize - slice->offset, dbc, in send_bo_list_to_device()
1231 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); in send_bo_list_to_device()