1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33
34 #include "amdgpu_reset.h"
35
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45
46 /*
47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49 * by host.
50 *
51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52 * correct value since it doesn't return the RCV_DW0 under the case that
53 * RCV_MSG_VALID is set by host.
54 */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59
60
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 enum idh_event event)
63 {
64 int r = 0;
65 u32 reg;
66
67 reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 if (reg == IDH_FAIL)
69 r = -EINVAL;
70 else if (reg != event)
71 return -ENOENT;
72
73 xgpu_nv_mailbox_send_ack(adev);
74
75 return r;
76 }
77
xgpu_nv_peek_ack(struct amdgpu_device * adev)78 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
79 {
80 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
81 }
82
xgpu_nv_poll_ack(struct amdgpu_device * adev)83 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
84 {
85 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
86 u8 reg;
87
88 do {
89 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
90 if (reg & 2)
91 return 0;
92
93 mdelay(5);
94 timeout -= 5;
95 } while (timeout > 1);
96
97 dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
98
99 return -ETIME;
100 }
101
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)102 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
103 {
104 int r;
105 uint64_t timeout, now;
106
107 now = (uint64_t)ktime_to_ms(ktime_get());
108 timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
109
110 do {
111 r = xgpu_nv_mailbox_rcv_msg(adev, event);
112 if (!r) {
113 dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
114 return 0;
115 }
116
117 msleep(10);
118 now = (uint64_t)ktime_to_ms(ktime_get());
119 } while (timeout > now);
120
121 dev_dbg(adev->dev, "nv_poll_msg timed out\n");
122
123 return -ETIME;
124 }
125
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)126 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
127 enum idh_request req, u32 data1, u32 data2, u32 data3)
128 {
129 int r;
130 uint8_t trn;
131
132 /* IMPORTANT:
133 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
134 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
135 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
136 * will return immediatly
137 */
138 do {
139 xgpu_nv_mailbox_set_valid(adev, false);
140 trn = xgpu_nv_peek_ack(adev);
141 if (trn) {
142 dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
143 msleep(1);
144 }
145 } while (trn);
146
147 dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
148 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
149 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
150 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
151 WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
152 xgpu_nv_mailbox_set_valid(adev, true);
153
154 /* start to poll ack */
155 r = xgpu_nv_poll_ack(adev);
156 if (r)
157 dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
158
159 xgpu_nv_mailbox_set_valid(adev, false);
160 }
161
xgpu_nv_send_access_requests_with_param(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)162 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
163 enum idh_request req, u32 data1, u32 data2, u32 data3)
164 {
165 int r, retry = 1;
166 enum idh_event event = -1;
167
168 send_request:
169 xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
170
171 switch (req) {
172 case IDH_REQ_GPU_INIT_ACCESS:
173 case IDH_REQ_GPU_FINI_ACCESS:
174 case IDH_REQ_GPU_RESET_ACCESS:
175 event = IDH_READY_TO_ACCESS_GPU;
176 break;
177 case IDH_REQ_GPU_INIT_DATA:
178 event = IDH_REQ_GPU_INIT_DATA_READY;
179 break;
180 case IDH_RAS_POISON:
181 if (data1 != 0)
182 event = IDH_RAS_POISON_READY;
183 break;
184 case IDH_REQ_RAS_ERROR_COUNT:
185 event = IDH_RAS_ERROR_COUNT_READY;
186 break;
187 case IDH_REQ_RAS_CPER_DUMP:
188 event = IDH_RAS_CPER_DUMP_READY;
189 break;
190 default:
191 break;
192 }
193
194 if (event != -1) {
195 r = xgpu_nv_poll_msg(adev, event);
196 if (r) {
197 if (retry++ < 5)
198 goto send_request;
199
200 if (req != IDH_REQ_GPU_INIT_DATA) {
201 dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
202 return r;
203 } else /* host doesn't support REQ_GPU_INIT_DATA handshake */
204 adev->virt.req_init_data_ver = 0;
205 } else {
206 if (req == IDH_REQ_GPU_INIT_DATA) {
207 adev->virt.req_init_data_ver =
208 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
209
210 /* assume V1 in case host doesn't set version number */
211 if (adev->virt.req_init_data_ver < 1)
212 adev->virt.req_init_data_ver = 1;
213 }
214 }
215
216 /* Retrieve checksum from mailbox2 */
217 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
218 adev->virt.fw_reserve.checksum_key =
219 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
220 }
221 }
222
223 return 0;
224 }
225
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)226 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
227 enum idh_request req)
228 {
229 return xgpu_nv_send_access_requests_with_param(adev,
230 req, 0, 0, 0);
231 }
232
xgpu_nv_request_reset(struct amdgpu_device * adev)233 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
234 {
235 int ret, i = 0;
236
237 while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
238 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
239 if (!ret)
240 break;
241 i++;
242 }
243
244 return ret;
245 }
246
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)247 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
248 bool init)
249 {
250 enum idh_request req;
251
252 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
253 return xgpu_nv_send_access_requests(adev, req);
254 }
255
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)256 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
257 bool init)
258 {
259 enum idh_request req;
260 int r = 0;
261
262 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
263 r = xgpu_nv_send_access_requests(adev, req);
264
265 return r;
266 }
267
xgpu_nv_request_init_data(struct amdgpu_device * adev)268 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
269 {
270 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
271 }
272
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)273 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
274 struct amdgpu_irq_src *source,
275 struct amdgpu_iv_entry *entry)
276 {
277 dev_dbg(adev->dev, "get ack intr and do nothing.\n");
278 return 0;
279 }
280
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)281 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
282 struct amdgpu_irq_src *source,
283 unsigned type,
284 enum amdgpu_interrupt_state state)
285 {
286 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
287
288 if (state == AMDGPU_IRQ_STATE_ENABLE)
289 tmp |= 2;
290 else
291 tmp &= ~2;
292
293 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
294
295 return 0;
296 }
297
xgpu_nv_ready_to_reset(struct amdgpu_device * adev)298 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
299 {
300 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
301 }
302
xgpu_nv_wait_reset(struct amdgpu_device * adev)303 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
304 {
305 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
306 do {
307 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
308 dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
309 return 0;
310 }
311 msleep(10);
312 timeout -= 10;
313 } while (timeout > 1);
314
315 dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
316 return -ETIME;
317 }
318
xgpu_nv_mailbox_flr_work(struct work_struct * work)319 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
320 {
321 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
322 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
323
324 amdgpu_virt_fini_data_exchange(adev);
325
326 /* Trigger recovery for world switch failure if no TDR */
327 if (amdgpu_device_should_recover_gpu(adev)
328 && (!amdgpu_device_has_job_running(adev) ||
329 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
330 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
331 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
332 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
333 struct amdgpu_reset_context reset_context;
334 memset(&reset_context, 0, sizeof(reset_context));
335
336 reset_context.method = AMD_RESET_METHOD_NONE;
337 reset_context.reset_req_dev = adev;
338 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
339 set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
340
341 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
342 }
343 }
344
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)345 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
346 struct amdgpu_irq_src *src,
347 unsigned type,
348 enum amdgpu_interrupt_state state)
349 {
350 u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
351
352 if (state == AMDGPU_IRQ_STATE_ENABLE)
353 tmp |= 1;
354 else
355 tmp &= ~1;
356
357 WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
358
359 return 0;
360 }
361
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)362 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
363 struct amdgpu_irq_src *source,
364 struct amdgpu_iv_entry *entry)
365 {
366 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
367
368 switch (event) {
369 case IDH_FLR_NOTIFICATION:
370 if (amdgpu_sriov_runtime(adev))
371 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
372 &adev->virt.flr_work),
373 "Failed to queue work! at %s",
374 __func__);
375 break;
376 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
377 * it byfar since that polling thread will handle it,
378 * other msg like flr complete is not handled here.
379 */
380 case IDH_CLR_MSG_BUF:
381 case IDH_FLR_NOTIFICATION_CMPL:
382 case IDH_READY_TO_ACCESS_GPU:
383 default:
384 break;
385 }
386
387 return 0;
388 }
389
390 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
391 .set = xgpu_nv_set_mailbox_ack_irq,
392 .process = xgpu_nv_mailbox_ack_irq,
393 };
394
395 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
396 .set = xgpu_nv_set_mailbox_rcv_irq,
397 .process = xgpu_nv_mailbox_rcv_irq,
398 };
399
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)400 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
401 {
402 adev->virt.ack_irq.num_types = 1;
403 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
404 adev->virt.rcv_irq.num_types = 1;
405 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
406 }
407
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)408 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
409 {
410 int r;
411
412 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
413 if (r)
414 return r;
415
416 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
417 if (r) {
418 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
419 return r;
420 }
421
422 return 0;
423 }
424
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)425 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
426 {
427 int r;
428
429 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
430 if (r)
431 return r;
432 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
433 if (r) {
434 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
435 return r;
436 }
437
438 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
439
440 return 0;
441 }
442
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)443 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
444 {
445 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
446 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
447 }
448
xgpu_nv_ras_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block)449 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
450 enum amdgpu_ras_block block)
451 {
452 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
453 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
454 } else {
455 amdgpu_virt_fini_data_exchange(adev);
456 xgpu_nv_send_access_requests_with_param(adev,
457 IDH_RAS_POISON, block, 0, 0);
458 }
459 }
460
xgpu_nv_rcvd_ras_intr(struct amdgpu_device * adev)461 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
462 {
463 enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
464
465 return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
466 }
467
xgpu_nv_req_ras_err_count(struct amdgpu_device * adev)468 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
469 {
470 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
471 }
472
xgpu_nv_req_ras_cper_dump(struct amdgpu_device * adev,u64 vf_rptr)473 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
474 {
475 uint32_t vf_rptr_hi, vf_rptr_lo;
476
477 vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
478 vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
479 return xgpu_nv_send_access_requests_with_param(
480 adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
481 }
482
483 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
484 .req_full_gpu = xgpu_nv_request_full_gpu_access,
485 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
486 .req_init_data = xgpu_nv_request_init_data,
487 .reset_gpu = xgpu_nv_request_reset,
488 .ready_to_reset = xgpu_nv_ready_to_reset,
489 .wait_reset = xgpu_nv_wait_reset,
490 .trans_msg = xgpu_nv_mailbox_trans_msg,
491 .ras_poison_handler = xgpu_nv_ras_poison_handler,
492 .rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
493 .req_ras_err_count = xgpu_nv_req_ras_err_count,
494 .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
495 };
496