1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_exec.h>
26
27 #include "amdgpu_mes.h"
28 #include "amdgpu.h"
29 #include "soc15_common.h"
30 #include "amdgpu_mes_ctx.h"
31
32 #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33 #define AMDGPU_ONE_DOORBELL_SIZE 8
34
amdgpu_mes_doorbell_process_slice(struct amdgpu_device * adev)35 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36 {
37 return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38 AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39 PAGE_SIZE);
40 }
41
amdgpu_mes_kernel_doorbell_get(struct amdgpu_device * adev,struct amdgpu_mes_process * process,int ip_type,uint64_t * doorbell_index)42 static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43 struct amdgpu_mes_process *process,
44 int ip_type, uint64_t *doorbell_index)
45 {
46 unsigned int offset, found;
47 struct amdgpu_mes *mes = &adev->mes;
48
49 if (ip_type == AMDGPU_RING_TYPE_SDMA)
50 offset = adev->doorbell_index.sdma_engine[0];
51 else
52 offset = 0;
53
54 found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55 if (found >= mes->num_mes_dbs) {
56 DRM_WARN("No doorbell available\n");
57 return -ENOSPC;
58 }
59
60 set_bit(found, mes->doorbell_bitmap);
61
62 /* Get the absolute doorbell index on BAR */
63 *doorbell_index = mes->db_start_dw_offset + found * 2;
64 return 0;
65 }
66
amdgpu_mes_kernel_doorbell_free(struct amdgpu_device * adev,struct amdgpu_mes_process * process,uint32_t doorbell_index)67 static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68 struct amdgpu_mes_process *process,
69 uint32_t doorbell_index)
70 {
71 unsigned int old, rel_index;
72 struct amdgpu_mes *mes = &adev->mes;
73
74 /* Find the relative index of the doorbell in this object */
75 rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76 old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77 WARN_ON(!old);
78 }
79
amdgpu_mes_doorbell_init(struct amdgpu_device * adev)80 static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81 {
82 int i;
83 struct amdgpu_mes *mes = &adev->mes;
84
85 /* Bitmap for dynamic allocation of kernel doorbells */
86 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87 if (!mes->doorbell_bitmap) {
88 DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89 return -ENOMEM;
90 }
91
92 mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93 for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94 adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95 set_bit(i, mes->doorbell_bitmap);
96 }
97
98 return 0;
99 }
100
amdgpu_mes_doorbell_free(struct amdgpu_device * adev)101 static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
102 {
103 bitmap_free(adev->mes.doorbell_bitmap);
104 }
105
amdgpu_mes_init(struct amdgpu_device * adev)106 int amdgpu_mes_init(struct amdgpu_device *adev)
107 {
108 int i, r;
109
110 adev->mes.adev = adev;
111
112 idr_init(&adev->mes.pasid_idr);
113 idr_init(&adev->mes.gang_id_idr);
114 idr_init(&adev->mes.queue_id_idr);
115 ida_init(&adev->mes.doorbell_ida);
116 spin_lock_init(&adev->mes.queue_id_lock);
117 spin_lock_init(&adev->mes.ring_lock);
118 mutex_init(&adev->mes.mutex_hidden);
119
120 adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121 adev->mes.vmid_mask_mmhub = 0xffffff00;
122 adev->mes.vmid_mask_gfxhub = 0xffffff00;
123
124 for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125 /* use only 1st MEC pipes */
126 if (i >= 4)
127 continue;
128 adev->mes.compute_hqd_mask[i] = 0xc;
129 }
130
131 for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
133
134 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135 if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
136 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
137 /* zero sdma_hqd_mask for non-existent engine */
138 else if (adev->sdma.num_instances == 1)
139 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
140 else
141 adev->mes.sdma_hqd_mask[i] = 0xfc;
142 }
143
144 r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
145 if (r) {
146 dev_err(adev->dev,
147 "(%d) ring trail_fence_offs wb alloc failed\n", r);
148 goto error_ids;
149 }
150 adev->mes.sch_ctx_gpu_addr =
151 adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
152 adev->mes.sch_ctx_ptr =
153 (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
154
155 r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
156 if (r) {
157 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
158 dev_err(adev->dev,
159 "(%d) query_status_fence_offs wb alloc failed\n", r);
160 goto error_ids;
161 }
162 adev->mes.query_status_fence_gpu_addr =
163 adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
164 adev->mes.query_status_fence_ptr =
165 (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
166
167 r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
168 if (r) {
169 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
170 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
171 dev_err(adev->dev,
172 "(%d) read_val_offs alloc failed\n", r);
173 goto error_ids;
174 }
175 adev->mes.read_val_gpu_addr =
176 adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
177 adev->mes.read_val_ptr =
178 (uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
179
180 r = amdgpu_mes_doorbell_init(adev);
181 if (r)
182 goto error;
183
184 return 0;
185
186 error:
187 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
188 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
189 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
190 error_ids:
191 idr_destroy(&adev->mes.pasid_idr);
192 idr_destroy(&adev->mes.gang_id_idr);
193 idr_destroy(&adev->mes.queue_id_idr);
194 ida_destroy(&adev->mes.doorbell_ida);
195 mutex_destroy(&adev->mes.mutex_hidden);
196 return r;
197 }
198
amdgpu_mes_fini(struct amdgpu_device * adev)199 void amdgpu_mes_fini(struct amdgpu_device *adev)
200 {
201 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
202 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
203 amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
204 amdgpu_mes_doorbell_free(adev);
205
206 idr_destroy(&adev->mes.pasid_idr);
207 idr_destroy(&adev->mes.gang_id_idr);
208 idr_destroy(&adev->mes.queue_id_idr);
209 ida_destroy(&adev->mes.doorbell_ida);
210 mutex_destroy(&adev->mes.mutex_hidden);
211 }
212
amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue * q)213 static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
214 {
215 amdgpu_bo_free_kernel(&q->mqd_obj,
216 &q->mqd_gpu_addr,
217 &q->mqd_cpu_ptr);
218 }
219
amdgpu_mes_create_process(struct amdgpu_device * adev,int pasid,struct amdgpu_vm * vm)220 int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
221 struct amdgpu_vm *vm)
222 {
223 struct amdgpu_mes_process *process;
224 int r;
225
226 /* allocate the mes process buffer */
227 process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
228 if (!process) {
229 DRM_ERROR("no more memory to create mes process\n");
230 return -ENOMEM;
231 }
232
233 /* allocate the process context bo and map it */
234 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
235 AMDGPU_GEM_DOMAIN_GTT,
236 &process->proc_ctx_bo,
237 &process->proc_ctx_gpu_addr,
238 &process->proc_ctx_cpu_ptr);
239 if (r) {
240 DRM_ERROR("failed to allocate process context bo\n");
241 goto clean_up_memory;
242 }
243 memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
244
245 /*
246 * Avoid taking any other locks under MES lock to avoid circular
247 * lock dependencies.
248 */
249 amdgpu_mes_lock(&adev->mes);
250
251 /* add the mes process to idr list */
252 r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
253 GFP_KERNEL);
254 if (r < 0) {
255 DRM_ERROR("failed to lock pasid=%d\n", pasid);
256 goto clean_up_ctx;
257 }
258
259 INIT_LIST_HEAD(&process->gang_list);
260 process->vm = vm;
261 process->pasid = pasid;
262 process->process_quantum = adev->mes.default_process_quantum;
263 process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
264
265 amdgpu_mes_unlock(&adev->mes);
266 return 0;
267
268 clean_up_ctx:
269 amdgpu_mes_unlock(&adev->mes);
270 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
271 &process->proc_ctx_gpu_addr,
272 &process->proc_ctx_cpu_ptr);
273 clean_up_memory:
274 kfree(process);
275 return r;
276 }
277
amdgpu_mes_destroy_process(struct amdgpu_device * adev,int pasid)278 void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
279 {
280 struct amdgpu_mes_process *process;
281 struct amdgpu_mes_gang *gang, *tmp1;
282 struct amdgpu_mes_queue *queue, *tmp2;
283 struct mes_remove_queue_input queue_input;
284 unsigned long flags;
285 int r;
286
287 /*
288 * Avoid taking any other locks under MES lock to avoid circular
289 * lock dependencies.
290 */
291 amdgpu_mes_lock(&adev->mes);
292
293 process = idr_find(&adev->mes.pasid_idr, pasid);
294 if (!process) {
295 DRM_WARN("pasid %d doesn't exist\n", pasid);
296 amdgpu_mes_unlock(&adev->mes);
297 return;
298 }
299
300 /* Remove all queues from hardware */
301 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
302 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
303 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
304 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
305 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
306
307 queue_input.doorbell_offset = queue->doorbell_off;
308 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
309
310 r = adev->mes.funcs->remove_hw_queue(&adev->mes,
311 &queue_input);
312 if (r)
313 DRM_WARN("failed to remove hardware queue\n");
314 }
315
316 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
317 }
318
319 idr_remove(&adev->mes.pasid_idr, pasid);
320 amdgpu_mes_unlock(&adev->mes);
321
322 /* free all memory allocated by the process */
323 list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
324 /* free all queues in the gang */
325 list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
326 amdgpu_mes_queue_free_mqd(queue);
327 list_del(&queue->list);
328 kfree(queue);
329 }
330 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
331 &gang->gang_ctx_gpu_addr,
332 &gang->gang_ctx_cpu_ptr);
333 list_del(&gang->list);
334 kfree(gang);
335
336 }
337 amdgpu_bo_free_kernel(&process->proc_ctx_bo,
338 &process->proc_ctx_gpu_addr,
339 &process->proc_ctx_cpu_ptr);
340 kfree(process);
341 }
342
amdgpu_mes_add_gang(struct amdgpu_device * adev,int pasid,struct amdgpu_mes_gang_properties * gprops,int * gang_id)343 int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
344 struct amdgpu_mes_gang_properties *gprops,
345 int *gang_id)
346 {
347 struct amdgpu_mes_process *process;
348 struct amdgpu_mes_gang *gang;
349 int r;
350
351 /* allocate the mes gang buffer */
352 gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
353 if (!gang) {
354 return -ENOMEM;
355 }
356
357 /* allocate the gang context bo and map it to cpu space */
358 r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
359 AMDGPU_GEM_DOMAIN_GTT,
360 &gang->gang_ctx_bo,
361 &gang->gang_ctx_gpu_addr,
362 &gang->gang_ctx_cpu_ptr);
363 if (r) {
364 DRM_ERROR("failed to allocate process context bo\n");
365 goto clean_up_mem;
366 }
367 memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
368
369 /*
370 * Avoid taking any other locks under MES lock to avoid circular
371 * lock dependencies.
372 */
373 amdgpu_mes_lock(&adev->mes);
374
375 process = idr_find(&adev->mes.pasid_idr, pasid);
376 if (!process) {
377 DRM_ERROR("pasid %d doesn't exist\n", pasid);
378 r = -EINVAL;
379 goto clean_up_ctx;
380 }
381
382 /* add the mes gang to idr list */
383 r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
384 GFP_KERNEL);
385 if (r < 0) {
386 DRM_ERROR("failed to allocate idr for gang\n");
387 goto clean_up_ctx;
388 }
389
390 gang->gang_id = r;
391 *gang_id = r;
392
393 INIT_LIST_HEAD(&gang->queue_list);
394 gang->process = process;
395 gang->priority = gprops->priority;
396 gang->gang_quantum = gprops->gang_quantum ?
397 gprops->gang_quantum : adev->mes.default_gang_quantum;
398 gang->global_priority_level = gprops->global_priority_level;
399 gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
400 list_add_tail(&gang->list, &process->gang_list);
401
402 amdgpu_mes_unlock(&adev->mes);
403 return 0;
404
405 clean_up_ctx:
406 amdgpu_mes_unlock(&adev->mes);
407 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
408 &gang->gang_ctx_gpu_addr,
409 &gang->gang_ctx_cpu_ptr);
410 clean_up_mem:
411 kfree(gang);
412 return r;
413 }
414
amdgpu_mes_remove_gang(struct amdgpu_device * adev,int gang_id)415 int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
416 {
417 struct amdgpu_mes_gang *gang;
418
419 /*
420 * Avoid taking any other locks under MES lock to avoid circular
421 * lock dependencies.
422 */
423 amdgpu_mes_lock(&adev->mes);
424
425 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
426 if (!gang) {
427 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
428 amdgpu_mes_unlock(&adev->mes);
429 return -EINVAL;
430 }
431
432 if (!list_empty(&gang->queue_list)) {
433 DRM_ERROR("queue list is not empty\n");
434 amdgpu_mes_unlock(&adev->mes);
435 return -EBUSY;
436 }
437
438 idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
439 list_del(&gang->list);
440 amdgpu_mes_unlock(&adev->mes);
441
442 amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
443 &gang->gang_ctx_gpu_addr,
444 &gang->gang_ctx_cpu_ptr);
445
446 kfree(gang);
447
448 return 0;
449 }
450
amdgpu_mes_suspend(struct amdgpu_device * adev)451 int amdgpu_mes_suspend(struct amdgpu_device *adev)
452 {
453 struct idr *idp;
454 struct amdgpu_mes_process *process;
455 struct amdgpu_mes_gang *gang;
456 struct mes_suspend_gang_input input;
457 int r, pasid;
458
459 /*
460 * Avoid taking any other locks under MES lock to avoid circular
461 * lock dependencies.
462 */
463 amdgpu_mes_lock(&adev->mes);
464
465 idp = &adev->mes.pasid_idr;
466
467 idr_for_each_entry(idp, process, pasid) {
468 list_for_each_entry(gang, &process->gang_list, list) {
469 r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
470 if (r)
471 DRM_ERROR("failed to suspend pasid %d gangid %d",
472 pasid, gang->gang_id);
473 }
474 }
475
476 amdgpu_mes_unlock(&adev->mes);
477 return 0;
478 }
479
amdgpu_mes_resume(struct amdgpu_device * adev)480 int amdgpu_mes_resume(struct amdgpu_device *adev)
481 {
482 struct idr *idp;
483 struct amdgpu_mes_process *process;
484 struct amdgpu_mes_gang *gang;
485 struct mes_resume_gang_input input;
486 int r, pasid;
487
488 /*
489 * Avoid taking any other locks under MES lock to avoid circular
490 * lock dependencies.
491 */
492 amdgpu_mes_lock(&adev->mes);
493
494 idp = &adev->mes.pasid_idr;
495
496 idr_for_each_entry(idp, process, pasid) {
497 list_for_each_entry(gang, &process->gang_list, list) {
498 r = adev->mes.funcs->resume_gang(&adev->mes, &input);
499 if (r)
500 DRM_ERROR("failed to resume pasid %d gangid %d",
501 pasid, gang->gang_id);
502 }
503 }
504
505 amdgpu_mes_unlock(&adev->mes);
506 return 0;
507 }
508
amdgpu_mes_queue_alloc_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)509 static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
510 struct amdgpu_mes_queue *q,
511 struct amdgpu_mes_queue_properties *p)
512 {
513 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
514 u32 mqd_size = mqd_mgr->mqd_size;
515 int r;
516
517 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
518 AMDGPU_GEM_DOMAIN_GTT,
519 &q->mqd_obj,
520 &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
521 if (r) {
522 dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
523 return r;
524 }
525 memset(q->mqd_cpu_ptr, 0, mqd_size);
526
527 r = amdgpu_bo_reserve(q->mqd_obj, false);
528 if (unlikely(r != 0))
529 goto clean_up;
530
531 return 0;
532
533 clean_up:
534 amdgpu_bo_free_kernel(&q->mqd_obj,
535 &q->mqd_gpu_addr,
536 &q->mqd_cpu_ptr);
537 return r;
538 }
539
amdgpu_mes_queue_init_mqd(struct amdgpu_device * adev,struct amdgpu_mes_queue * q,struct amdgpu_mes_queue_properties * p)540 static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
541 struct amdgpu_mes_queue *q,
542 struct amdgpu_mes_queue_properties *p)
543 {
544 struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
545 struct amdgpu_mqd_prop mqd_prop = {0};
546
547 mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
548 mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
549 mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
550 mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
551 mqd_prop.queue_size = p->queue_size;
552 mqd_prop.use_doorbell = true;
553 mqd_prop.doorbell_index = p->doorbell_off;
554 mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
555 mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
556 mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
557 mqd_prop.hqd_active = false;
558
559 mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
560
561 amdgpu_bo_unreserve(q->mqd_obj);
562 }
563
amdgpu_mes_add_hw_queue(struct amdgpu_device * adev,int gang_id,struct amdgpu_mes_queue_properties * qprops,int * queue_id)564 int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
565 struct amdgpu_mes_queue_properties *qprops,
566 int *queue_id)
567 {
568 struct amdgpu_mes_queue *queue;
569 struct amdgpu_mes_gang *gang;
570 struct mes_add_queue_input queue_input;
571 unsigned long flags;
572 int r;
573
574 memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
575
576 /* allocate the mes queue buffer */
577 queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
578 if (!queue) {
579 DRM_ERROR("Failed to allocate memory for queue\n");
580 return -ENOMEM;
581 }
582
583 /* Allocate the queue mqd */
584 r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
585 if (r)
586 goto clean_up_memory;
587
588 /*
589 * Avoid taking any other locks under MES lock to avoid circular
590 * lock dependencies.
591 */
592 amdgpu_mes_lock(&adev->mes);
593
594 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
595 if (!gang) {
596 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
597 r = -EINVAL;
598 goto clean_up_mqd;
599 }
600
601 /* add the mes gang to idr list */
602 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
603 r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
604 GFP_ATOMIC);
605 if (r < 0) {
606 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
607 goto clean_up_mqd;
608 }
609 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
610 *queue_id = queue->queue_id = r;
611
612 /* allocate a doorbell index for the queue */
613 r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
614 qprops->queue_type,
615 &qprops->doorbell_off);
616 if (r)
617 goto clean_up_queue_id;
618
619 /* initialize the queue mqd */
620 amdgpu_mes_queue_init_mqd(adev, queue, qprops);
621
622 /* add hw queue to mes */
623 queue_input.process_id = gang->process->pasid;
624
625 queue_input.page_table_base_addr =
626 adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
627 adev->gmc.vram_start;
628
629 queue_input.process_va_start = 0;
630 queue_input.process_va_end =
631 (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
632 queue_input.process_quantum = gang->process->process_quantum;
633 queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
634 queue_input.gang_quantum = gang->gang_quantum;
635 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
636 queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
637 queue_input.gang_global_priority_level = gang->global_priority_level;
638 queue_input.doorbell_offset = qprops->doorbell_off;
639 queue_input.mqd_addr = queue->mqd_gpu_addr;
640 queue_input.wptr_addr = qprops->wptr_gpu_addr;
641 queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
642 queue_input.queue_type = qprops->queue_type;
643 queue_input.paging = qprops->paging;
644 queue_input.is_kfd_process = 0;
645
646 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
647 if (r) {
648 DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
649 qprops->doorbell_off);
650 goto clean_up_doorbell;
651 }
652
653 DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
654 "queue type=%d, doorbell=0x%llx\n",
655 gang->process->pasid, gang_id, qprops->queue_type,
656 qprops->doorbell_off);
657
658 queue->ring = qprops->ring;
659 queue->doorbell_off = qprops->doorbell_off;
660 queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
661 queue->queue_type = qprops->queue_type;
662 queue->paging = qprops->paging;
663 queue->gang = gang;
664 queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
665 list_add_tail(&queue->list, &gang->queue_list);
666
667 amdgpu_mes_unlock(&adev->mes);
668 return 0;
669
670 clean_up_doorbell:
671 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
672 qprops->doorbell_off);
673 clean_up_queue_id:
674 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
675 idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
676 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
677 clean_up_mqd:
678 amdgpu_mes_unlock(&adev->mes);
679 amdgpu_mes_queue_free_mqd(queue);
680 clean_up_memory:
681 kfree(queue);
682 return r;
683 }
684
amdgpu_mes_remove_hw_queue(struct amdgpu_device * adev,int queue_id)685 int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
686 {
687 unsigned long flags;
688 struct amdgpu_mes_queue *queue;
689 struct amdgpu_mes_gang *gang;
690 struct mes_remove_queue_input queue_input;
691 int r;
692
693 /*
694 * Avoid taking any other locks under MES lock to avoid circular
695 * lock dependencies.
696 */
697 amdgpu_mes_lock(&adev->mes);
698
699 /* remove the mes gang from idr list */
700 spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
701
702 queue = idr_find(&adev->mes.queue_id_idr, queue_id);
703 if (!queue) {
704 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
705 amdgpu_mes_unlock(&adev->mes);
706 DRM_ERROR("queue id %d doesn't exist\n", queue_id);
707 return -EINVAL;
708 }
709
710 idr_remove(&adev->mes.queue_id_idr, queue_id);
711 spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
712
713 DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
714 queue->doorbell_off);
715
716 gang = queue->gang;
717 queue_input.doorbell_offset = queue->doorbell_off;
718 queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
719
720 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
721 if (r)
722 DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
723 queue_id);
724
725 list_del(&queue->list);
726 amdgpu_mes_kernel_doorbell_free(adev, gang->process,
727 queue->doorbell_off);
728 amdgpu_mes_unlock(&adev->mes);
729
730 amdgpu_mes_queue_free_mqd(queue);
731 kfree(queue);
732 return 0;
733 }
734
amdgpu_mes_unmap_legacy_queue(struct amdgpu_device * adev,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)735 int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
736 struct amdgpu_ring *ring,
737 enum amdgpu_unmap_queues_action action,
738 u64 gpu_addr, u64 seq)
739 {
740 struct mes_unmap_legacy_queue_input queue_input;
741 int r;
742
743 queue_input.action = action;
744 queue_input.queue_type = ring->funcs->type;
745 queue_input.doorbell_offset = ring->doorbell_index;
746 queue_input.pipe_id = ring->pipe;
747 queue_input.queue_id = ring->queue;
748 queue_input.trail_fence_addr = gpu_addr;
749 queue_input.trail_fence_data = seq;
750
751 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
752 if (r)
753 DRM_ERROR("failed to unmap legacy queue\n");
754
755 return r;
756 }
757
amdgpu_mes_rreg(struct amdgpu_device * adev,uint32_t reg)758 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
759 {
760 struct mes_misc_op_input op_input;
761 int r, val = 0;
762
763 op_input.op = MES_MISC_OP_READ_REG;
764 op_input.read_reg.reg_offset = reg;
765 op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
766
767 if (!adev->mes.funcs->misc_op) {
768 DRM_ERROR("mes rreg is not supported!\n");
769 goto error;
770 }
771
772 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
773 if (r)
774 DRM_ERROR("failed to read reg (0x%x)\n", reg);
775 else
776 val = *(adev->mes.read_val_ptr);
777
778 error:
779 return val;
780 }
781
amdgpu_mes_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t val)782 int amdgpu_mes_wreg(struct amdgpu_device *adev,
783 uint32_t reg, uint32_t val)
784 {
785 struct mes_misc_op_input op_input;
786 int r;
787
788 op_input.op = MES_MISC_OP_WRITE_REG;
789 op_input.write_reg.reg_offset = reg;
790 op_input.write_reg.reg_value = val;
791
792 if (!adev->mes.funcs->misc_op) {
793 DRM_ERROR("mes wreg is not supported!\n");
794 r = -EINVAL;
795 goto error;
796 }
797
798 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
799 if (r)
800 DRM_ERROR("failed to write reg (0x%x)\n", reg);
801
802 error:
803 return r;
804 }
805
amdgpu_mes_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)806 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
807 uint32_t reg0, uint32_t reg1,
808 uint32_t ref, uint32_t mask)
809 {
810 struct mes_misc_op_input op_input;
811 int r;
812
813 op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
814 op_input.wrm_reg.reg0 = reg0;
815 op_input.wrm_reg.reg1 = reg1;
816 op_input.wrm_reg.ref = ref;
817 op_input.wrm_reg.mask = mask;
818
819 if (!adev->mes.funcs->misc_op) {
820 DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
821 r = -EINVAL;
822 goto error;
823 }
824
825 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
826 if (r)
827 DRM_ERROR("failed to reg_write_reg_wait\n");
828
829 error:
830 return r;
831 }
832
amdgpu_mes_reg_wait(struct amdgpu_device * adev,uint32_t reg,uint32_t val,uint32_t mask)833 int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
834 uint32_t val, uint32_t mask)
835 {
836 struct mes_misc_op_input op_input;
837 int r;
838
839 op_input.op = MES_MISC_OP_WRM_REG_WAIT;
840 op_input.wrm_reg.reg0 = reg;
841 op_input.wrm_reg.ref = val;
842 op_input.wrm_reg.mask = mask;
843
844 if (!adev->mes.funcs->misc_op) {
845 DRM_ERROR("mes reg wait is not supported!\n");
846 r = -EINVAL;
847 goto error;
848 }
849
850 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
851 if (r)
852 DRM_ERROR("failed to reg_write_reg_wait\n");
853
854 error:
855 return r;
856 }
857
amdgpu_mes_set_shader_debugger(struct amdgpu_device * adev,uint64_t process_context_addr,uint32_t spi_gdbg_per_vmid_cntl,const uint32_t * tcp_watch_cntl,uint32_t flags,bool trap_en)858 int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
859 uint64_t process_context_addr,
860 uint32_t spi_gdbg_per_vmid_cntl,
861 const uint32_t *tcp_watch_cntl,
862 uint32_t flags,
863 bool trap_en)
864 {
865 struct mes_misc_op_input op_input = {0};
866 int r;
867
868 if (!adev->mes.funcs->misc_op) {
869 DRM_ERROR("mes set shader debugger is not supported!\n");
870 return -EINVAL;
871 }
872
873 op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
874 op_input.set_shader_debugger.process_context_addr = process_context_addr;
875 op_input.set_shader_debugger.flags.u32all = flags;
876 op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
877 memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
878 sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
879
880 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
881 AMDGPU_MES_API_VERSION_SHIFT) >= 14)
882 op_input.set_shader_debugger.trap_en = trap_en;
883
884 amdgpu_mes_lock(&adev->mes);
885
886 r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
887 if (r)
888 DRM_ERROR("failed to set_shader_debugger\n");
889
890 amdgpu_mes_unlock(&adev->mes);
891
892 return r;
893 }
894
895 static void
amdgpu_mes_ring_to_queue_props(struct amdgpu_device * adev,struct amdgpu_ring * ring,struct amdgpu_mes_queue_properties * props)896 amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
897 struct amdgpu_ring *ring,
898 struct amdgpu_mes_queue_properties *props)
899 {
900 props->queue_type = ring->funcs->type;
901 props->hqd_base_gpu_addr = ring->gpu_addr;
902 props->rptr_gpu_addr = ring->rptr_gpu_addr;
903 props->wptr_gpu_addr = ring->wptr_gpu_addr;
904 props->wptr_mc_addr =
905 ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
906 props->queue_size = ring->ring_size;
907 props->eop_gpu_addr = ring->eop_gpu_addr;
908 props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
909 props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
910 props->paging = false;
911 props->ring = ring;
912 }
913
914 #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \
915 do { \
916 if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \
917 return offsetof(struct amdgpu_mes_ctx_meta_data, \
918 _eng[ring->idx].slots[id_offs]); \
919 else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \
920 return offsetof(struct amdgpu_mes_ctx_meta_data, \
921 _eng[ring->idx].ring); \
922 else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \
923 return offsetof(struct amdgpu_mes_ctx_meta_data, \
924 _eng[ring->idx].ib); \
925 else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \
926 return offsetof(struct amdgpu_mes_ctx_meta_data, \
927 _eng[ring->idx].padding); \
928 } while(0)
929
amdgpu_mes_ctx_get_offs(struct amdgpu_ring * ring,unsigned int id_offs)930 int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
931 {
932 switch (ring->funcs->type) {
933 case AMDGPU_RING_TYPE_GFX:
934 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
935 break;
936 case AMDGPU_RING_TYPE_COMPUTE:
937 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
938 break;
939 case AMDGPU_RING_TYPE_SDMA:
940 DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
941 break;
942 default:
943 break;
944 }
945
946 WARN_ON(1);
947 return -EINVAL;
948 }
949
amdgpu_mes_add_ring(struct amdgpu_device * adev,int gang_id,int queue_type,int idx,struct amdgpu_mes_ctx_data * ctx_data,struct amdgpu_ring ** out)950 int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
951 int queue_type, int idx,
952 struct amdgpu_mes_ctx_data *ctx_data,
953 struct amdgpu_ring **out)
954 {
955 struct amdgpu_ring *ring;
956 struct amdgpu_mes_gang *gang;
957 struct amdgpu_mes_queue_properties qprops = {0};
958 int r, queue_id, pasid;
959
960 /*
961 * Avoid taking any other locks under MES lock to avoid circular
962 * lock dependencies.
963 */
964 amdgpu_mes_lock(&adev->mes);
965 gang = idr_find(&adev->mes.gang_id_idr, gang_id);
966 if (!gang) {
967 DRM_ERROR("gang id %d doesn't exist\n", gang_id);
968 amdgpu_mes_unlock(&adev->mes);
969 return -EINVAL;
970 }
971 pasid = gang->process->pasid;
972
973 ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
974 if (!ring) {
975 amdgpu_mes_unlock(&adev->mes);
976 return -ENOMEM;
977 }
978
979 ring->ring_obj = NULL;
980 ring->use_doorbell = true;
981 ring->is_mes_queue = true;
982 ring->mes_ctx = ctx_data;
983 ring->idx = idx;
984 ring->no_scheduler = true;
985
986 if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
987 int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
988 compute[ring->idx].mec_hpd);
989 ring->eop_gpu_addr =
990 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
991 }
992
993 switch (queue_type) {
994 case AMDGPU_RING_TYPE_GFX:
995 ring->funcs = adev->gfx.gfx_ring[0].funcs;
996 break;
997 case AMDGPU_RING_TYPE_COMPUTE:
998 ring->funcs = adev->gfx.compute_ring[0].funcs;
999 break;
1000 case AMDGPU_RING_TYPE_SDMA:
1001 ring->funcs = adev->sdma.instance[0].ring.funcs;
1002 break;
1003 default:
1004 BUG();
1005 }
1006
1007 r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1008 AMDGPU_RING_PRIO_DEFAULT, NULL);
1009 if (r)
1010 goto clean_up_memory;
1011
1012 amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1013
1014 dma_fence_wait(gang->process->vm->last_update, false);
1015 dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1016 amdgpu_mes_unlock(&adev->mes);
1017
1018 r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1019 if (r)
1020 goto clean_up_ring;
1021
1022 ring->hw_queue_id = queue_id;
1023 ring->doorbell_index = qprops.doorbell_off;
1024
1025 if (queue_type == AMDGPU_RING_TYPE_GFX)
1026 sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1027 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1028 sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1029 queue_id);
1030 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1031 sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1032 queue_id);
1033 else
1034 BUG();
1035
1036 *out = ring;
1037 return 0;
1038
1039 clean_up_ring:
1040 amdgpu_ring_fini(ring);
1041 clean_up_memory:
1042 kfree(ring);
1043 amdgpu_mes_unlock(&adev->mes);
1044 return r;
1045 }
1046
amdgpu_mes_remove_ring(struct amdgpu_device * adev,struct amdgpu_ring * ring)1047 void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1048 struct amdgpu_ring *ring)
1049 {
1050 if (!ring)
1051 return;
1052
1053 amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1054 amdgpu_ring_fini(ring);
1055 kfree(ring);
1056 }
1057
amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device * adev,enum amdgpu_mes_priority_level prio)1058 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1059 enum amdgpu_mes_priority_level prio)
1060 {
1061 return adev->mes.aggregated_doorbells[prio];
1062 }
1063
amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1064 int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1065 struct amdgpu_mes_ctx_data *ctx_data)
1066 {
1067 int r;
1068
1069 r = amdgpu_bo_create_kernel(adev,
1070 sizeof(struct amdgpu_mes_ctx_meta_data),
1071 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1072 &ctx_data->meta_data_obj,
1073 &ctx_data->meta_data_mc_addr,
1074 &ctx_data->meta_data_ptr);
1075 if (r) {
1076 dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1077 return r;
1078 }
1079
1080 if (!ctx_data->meta_data_obj)
1081 return -ENOMEM;
1082
1083 memset(ctx_data->meta_data_ptr, 0,
1084 sizeof(struct amdgpu_mes_ctx_meta_data));
1085
1086 return 0;
1087 }
1088
amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data * ctx_data)1089 void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1090 {
1091 if (ctx_data->meta_data_obj)
1092 amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1093 &ctx_data->meta_data_mc_addr,
1094 &ctx_data->meta_data_ptr);
1095 }
1096
amdgpu_mes_ctx_map_meta_data(struct amdgpu_device * adev,struct amdgpu_vm * vm,struct amdgpu_mes_ctx_data * ctx_data)1097 int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1098 struct amdgpu_vm *vm,
1099 struct amdgpu_mes_ctx_data *ctx_data)
1100 {
1101 struct amdgpu_bo_va *bo_va;
1102 struct amdgpu_sync sync;
1103 struct drm_exec exec;
1104 int r;
1105
1106 amdgpu_sync_create(&sync);
1107
1108 drm_exec_init(&exec, 0);
1109 drm_exec_until_all_locked(&exec) {
1110 r = drm_exec_lock_obj(&exec,
1111 &ctx_data->meta_data_obj->tbo.base);
1112 drm_exec_retry_on_contention(&exec);
1113 if (unlikely(r))
1114 goto error_fini_exec;
1115
1116 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1117 drm_exec_retry_on_contention(&exec);
1118 if (unlikely(r))
1119 goto error_fini_exec;
1120 }
1121
1122 bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1123 if (!bo_va) {
1124 DRM_ERROR("failed to create bo_va for meta data BO\n");
1125 r = -ENOMEM;
1126 goto error_fini_exec;
1127 }
1128
1129 r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1130 sizeof(struct amdgpu_mes_ctx_meta_data),
1131 AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1132 AMDGPU_PTE_EXECUTABLE);
1133
1134 if (r) {
1135 DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1136 goto error_del_bo_va;
1137 }
1138
1139 r = amdgpu_vm_bo_update(adev, bo_va, false);
1140 if (r) {
1141 DRM_ERROR("failed to do vm_bo_update on meta data\n");
1142 goto error_del_bo_va;
1143 }
1144 amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1145
1146 r = amdgpu_vm_update_pdes(adev, vm, false);
1147 if (r) {
1148 DRM_ERROR("failed to update pdes on meta data\n");
1149 goto error_del_bo_va;
1150 }
1151 amdgpu_sync_fence(&sync, vm->last_update);
1152
1153 amdgpu_sync_wait(&sync, false);
1154 drm_exec_fini(&exec);
1155
1156 amdgpu_sync_free(&sync);
1157 ctx_data->meta_data_va = bo_va;
1158 return 0;
1159
1160 error_del_bo_va:
1161 amdgpu_vm_bo_del(adev, bo_va);
1162
1163 error_fini_exec:
1164 drm_exec_fini(&exec);
1165 amdgpu_sync_free(&sync);
1166 return r;
1167 }
1168
amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device * adev,struct amdgpu_mes_ctx_data * ctx_data)1169 int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1170 struct amdgpu_mes_ctx_data *ctx_data)
1171 {
1172 struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1173 struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1174 struct amdgpu_vm *vm = bo_va->base.vm;
1175 struct dma_fence *fence;
1176 struct drm_exec exec;
1177 long r;
1178
1179 drm_exec_init(&exec, 0);
1180 drm_exec_until_all_locked(&exec) {
1181 r = drm_exec_lock_obj(&exec,
1182 &ctx_data->meta_data_obj->tbo.base);
1183 drm_exec_retry_on_contention(&exec);
1184 if (unlikely(r))
1185 goto out_unlock;
1186
1187 r = amdgpu_vm_lock_pd(vm, &exec, 0);
1188 drm_exec_retry_on_contention(&exec);
1189 if (unlikely(r))
1190 goto out_unlock;
1191 }
1192
1193 amdgpu_vm_bo_del(adev, bo_va);
1194 if (!amdgpu_vm_ready(vm))
1195 goto out_unlock;
1196
1197 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1198 &fence);
1199 if (r)
1200 goto out_unlock;
1201 if (fence) {
1202 amdgpu_bo_fence(bo, fence, true);
1203 fence = NULL;
1204 }
1205
1206 r = amdgpu_vm_clear_freed(adev, vm, &fence);
1207 if (r || !fence)
1208 goto out_unlock;
1209
1210 dma_fence_wait(fence, false);
1211 amdgpu_bo_fence(bo, fence, true);
1212 dma_fence_put(fence);
1213
1214 out_unlock:
1215 if (unlikely(r < 0))
1216 dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1217 drm_exec_fini(&exec);
1218
1219 return r;
1220 }
1221
amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device * adev,int pasid,int * gang_id,int queue_type,int num_queue,struct amdgpu_ring ** added_rings,struct amdgpu_mes_ctx_data * ctx_data)1222 static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1223 int pasid, int *gang_id,
1224 int queue_type, int num_queue,
1225 struct amdgpu_ring **added_rings,
1226 struct amdgpu_mes_ctx_data *ctx_data)
1227 {
1228 struct amdgpu_ring *ring;
1229 struct amdgpu_mes_gang_properties gprops = {0};
1230 int r, j;
1231
1232 /* create a gang for the process */
1233 gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1234 gprops.gang_quantum = adev->mes.default_gang_quantum;
1235 gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1236 gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1237 gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1238
1239 r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1240 if (r) {
1241 DRM_ERROR("failed to add gang\n");
1242 return r;
1243 }
1244
1245 /* create queues for the gang */
1246 for (j = 0; j < num_queue; j++) {
1247 r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1248 ctx_data, &ring);
1249 if (r) {
1250 DRM_ERROR("failed to add ring\n");
1251 break;
1252 }
1253
1254 DRM_INFO("ring %s was added\n", ring->name);
1255 added_rings[j] = ring;
1256 }
1257
1258 return 0;
1259 }
1260
amdgpu_mes_test_queues(struct amdgpu_ring ** added_rings)1261 static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1262 {
1263 struct amdgpu_ring *ring;
1264 int i, r;
1265
1266 for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1267 ring = added_rings[i];
1268 if (!ring)
1269 continue;
1270
1271 r = amdgpu_ring_test_helper(ring);
1272 if (r)
1273 return r;
1274
1275 r = amdgpu_ring_test_ib(ring, 1000 * 10);
1276 if (r) {
1277 DRM_DEV_ERROR(ring->adev->dev,
1278 "ring %s ib test failed (%d)\n",
1279 ring->name, r);
1280 return r;
1281 } else
1282 DRM_INFO("ring %s ib test pass\n", ring->name);
1283 }
1284
1285 return 0;
1286 }
1287
amdgpu_mes_self_test(struct amdgpu_device * adev)1288 int amdgpu_mes_self_test(struct amdgpu_device *adev)
1289 {
1290 struct amdgpu_vm *vm = NULL;
1291 struct amdgpu_mes_ctx_data ctx_data = {0};
1292 struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1293 int gang_ids[3] = {0};
1294 int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1295 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1296 { AMDGPU_RING_TYPE_SDMA, 1} };
1297 int i, r, pasid, k = 0;
1298
1299 pasid = amdgpu_pasid_alloc(16);
1300 if (pasid < 0) {
1301 dev_warn(adev->dev, "No more PASIDs available!");
1302 pasid = 0;
1303 }
1304
1305 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1306 if (!vm) {
1307 r = -ENOMEM;
1308 goto error_pasid;
1309 }
1310
1311 r = amdgpu_vm_init(adev, vm, -1);
1312 if (r) {
1313 DRM_ERROR("failed to initialize vm\n");
1314 goto error_pasid;
1315 }
1316
1317 r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1318 if (r) {
1319 DRM_ERROR("failed to alloc ctx meta data\n");
1320 goto error_fini;
1321 }
1322
1323 ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1324 r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1325 if (r) {
1326 DRM_ERROR("failed to map ctx meta data\n");
1327 goto error_vm;
1328 }
1329
1330 r = amdgpu_mes_create_process(adev, pasid, vm);
1331 if (r) {
1332 DRM_ERROR("failed to create MES process\n");
1333 goto error_vm;
1334 }
1335
1336 for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1337 /* On GFX v10.3, fw hasn't supported to map sdma queue. */
1338 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1339 adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1340 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1341 continue;
1342
1343 r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1344 &gang_ids[i],
1345 queue_types[i][0],
1346 queue_types[i][1],
1347 &added_rings[k],
1348 &ctx_data);
1349 if (r)
1350 goto error_queues;
1351
1352 k += queue_types[i][1];
1353 }
1354
1355 /* start ring test and ib test for MES queues */
1356 amdgpu_mes_test_queues(added_rings);
1357
1358 error_queues:
1359 /* remove all queues */
1360 for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1361 if (!added_rings[i])
1362 continue;
1363 amdgpu_mes_remove_ring(adev, added_rings[i]);
1364 }
1365
1366 for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1367 if (!gang_ids[i])
1368 continue;
1369 amdgpu_mes_remove_gang(adev, gang_ids[i]);
1370 }
1371
1372 amdgpu_mes_destroy_process(adev, pasid);
1373
1374 error_vm:
1375 amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1376
1377 error_fini:
1378 amdgpu_vm_fini(adev, vm);
1379
1380 error_pasid:
1381 if (pasid)
1382 amdgpu_pasid_free(pasid);
1383
1384 amdgpu_mes_ctx_free_meta_data(&ctx_data);
1385 kfree(vm);
1386 return 0;
1387 }
1388
amdgpu_mes_init_microcode(struct amdgpu_device * adev,int pipe)1389 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1390 {
1391 const struct mes_firmware_header_v1_0 *mes_hdr;
1392 struct amdgpu_firmware_info *info;
1393 char ucode_prefix[30];
1394 char fw_name[40];
1395 bool need_retry = false;
1396 int r;
1397
1398 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1399 sizeof(ucode_prefix));
1400 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1401 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1402 ucode_prefix,
1403 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1404 need_retry = true;
1405 } else {
1406 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1407 ucode_prefix,
1408 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1409 }
1410
1411 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1412 if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1413 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1414 ucode_prefix);
1415 DRM_INFO("try to fall back to %s\n", fw_name);
1416 r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1417 fw_name);
1418 }
1419
1420 if (r)
1421 goto out;
1422
1423 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1424 adev->mes.fw[pipe]->data;
1425 adev->mes.uc_start_addr[pipe] =
1426 le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1427 ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1428 adev->mes.data_start_addr[pipe] =
1429 le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1430 ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1431
1432 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1433 int ucode, ucode_data;
1434
1435 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1436 ucode = AMDGPU_UCODE_ID_CP_MES;
1437 ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1438 } else {
1439 ucode = AMDGPU_UCODE_ID_CP_MES1;
1440 ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1441 }
1442
1443 info = &adev->firmware.ucode[ucode];
1444 info->ucode_id = ucode;
1445 info->fw = adev->mes.fw[pipe];
1446 adev->firmware.fw_size +=
1447 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1448 PAGE_SIZE);
1449
1450 info = &adev->firmware.ucode[ucode_data];
1451 info->ucode_id = ucode_data;
1452 info->fw = adev->mes.fw[pipe];
1453 adev->firmware.fw_size +=
1454 ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1455 PAGE_SIZE);
1456 }
1457
1458 return 0;
1459 out:
1460 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1461 return r;
1462 }
1463