xref: /linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33 
34 #include "amdgpu_reset.h"
35 
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 {
38 	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39 }
40 
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 {
43 	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44 }
45 
46 /*
47  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49  * by host.
50  *
51  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52  * correct value since it doesn't return the RCV_DW0 under the case that
53  * RCV_MSG_VALID is set by host.
54  */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 {
57 	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58 }
59 
60 
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62 				   enum idh_event event)
63 {
64 	int r = 0;
65 	u32 reg;
66 
67 	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
68 	if (reg == IDH_FAIL)
69 		r = -EINVAL;
70 	if (reg == IDH_UNRECOV_ERR_NOTIFICATION)
71 		r = -ENODEV;
72 	else if (reg != event)
73 		return -ENOENT;
74 
75 	xgpu_nv_mailbox_send_ack(adev);
76 
77 	return r;
78 }
79 
xgpu_nv_peek_ack(struct amdgpu_device * adev)80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
81 {
82 	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
83 }
84 
xgpu_nv_poll_ack(struct amdgpu_device * adev)85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
86 {
87 	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
88 	u8 reg;
89 
90 	do {
91 		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
92 		if (reg & 2)
93 			return 0;
94 
95 		mdelay(5);
96 		timeout -= 5;
97 	} while (timeout > 1);
98 
99 	dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
100 
101 	return -ETIME;
102 }
103 
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)104 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
105 {
106 	int r;
107 	uint64_t timeout, now;
108 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
109 
110 	now = (uint64_t)ktime_to_ms(ktime_get());
111 	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
112 
113 	do {
114 		r = xgpu_nv_mailbox_rcv_msg(adev, event);
115 		if (!r) {
116 			dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n",
117 					event, NV_MAILBOX_POLL_MSG_TIMEDOUT - timeout + now);
118 			return 0;
119 		} else if (r == -ENODEV) {
120 			if (!amdgpu_ras_is_rma(adev)) {
121 				ras->is_rma = true;
122 				dev_err(adev->dev, "VF is in an unrecoverable state. "
123 						"Runtime Services are halted.\n");
124 			}
125 			return r;
126 		}
127 
128 		msleep(10);
129 		now = (uint64_t)ktime_to_ms(ktime_get());
130 	} while (timeout > now);
131 
132 	dev_dbg(adev->dev, "nv_poll_msg timed out\n");
133 
134 	return -ETIME;
135 }
136 
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)137 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
138 	      enum idh_request req, u32 data1, u32 data2, u32 data3)
139 {
140 	int r;
141 	uint8_t trn;
142 
143 	/* IMPORTANT:
144 	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
145 	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
146 	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
147 	 * will return immediatly
148 	 */
149 	do {
150 		xgpu_nv_mailbox_set_valid(adev, false);
151 		trn = xgpu_nv_peek_ack(adev);
152 		if (trn) {
153 			dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn);
154 			msleep(1);
155 		}
156 	} while (trn);
157 
158 	dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1);
159 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
160 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
161 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
162 	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
163 	xgpu_nv_mailbox_set_valid(adev, true);
164 
165 	/* start to poll ack */
166 	r = xgpu_nv_poll_ack(adev);
167 	if (r)
168 		dev_err(adev->dev, "Doesn't get ack from pf, continue\n");
169 
170 	xgpu_nv_mailbox_set_valid(adev, false);
171 }
172 
xgpu_nv_send_access_requests_with_param(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)173 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
174 			enum idh_request req, u32 data1, u32 data2, u32 data3)
175 {
176 	int r, retry = 1;
177 	enum idh_event event = -1;
178 
179 send_request:
180 
181 	if (amdgpu_ras_is_rma(adev))
182 		return -ENODEV;
183 
184 	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
185 
186 	switch (req) {
187 	case IDH_REQ_GPU_INIT_ACCESS:
188 	case IDH_REQ_GPU_FINI_ACCESS:
189 	case IDH_REQ_GPU_RESET_ACCESS:
190 		event = IDH_READY_TO_ACCESS_GPU;
191 		break;
192 	case IDH_REQ_GPU_INIT_DATA:
193 		event = IDH_REQ_GPU_INIT_DATA_READY;
194 		break;
195 	case IDH_RAS_POISON:
196 		if (data1 != 0)
197 			event = IDH_RAS_POISON_READY;
198 		break;
199 	case IDH_REQ_RAS_ERROR_COUNT:
200 		event = IDH_RAS_ERROR_COUNT_READY;
201 		break;
202 	case IDH_REQ_RAS_CPER_DUMP:
203 		event = IDH_RAS_CPER_DUMP_READY;
204 		break;
205 	case IDH_REQ_RAS_BAD_PAGES:
206 		event = IDH_RAS_BAD_PAGES_READY;
207 		break;
208 	default:
209 		break;
210 	}
211 
212 	if (event != -1) {
213 		r = xgpu_nv_poll_msg(adev, event);
214 		if (r) {
215 			if (retry++ < 5)
216 				goto send_request;
217 
218 			if (req != IDH_REQ_GPU_INIT_DATA) {
219 				dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
220 				return r;
221 			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
222 				adev->virt.req_init_data_ver = 0;
223 		} else {
224 			if (req == IDH_REQ_GPU_INIT_DATA) {
225 				adev->virt.req_init_data_ver =
226 					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
227 
228 				/* assume V1 in case host doesn't set version number */
229 				if (adev->virt.req_init_data_ver < 1)
230 					adev->virt.req_init_data_ver = 1;
231 			}
232 		}
233 
234 		/* Retrieve checksum from mailbox2 */
235 		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
236 			adev->virt.fw_reserve.checksum_key =
237 				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
238 		}
239 	}
240 
241 	return 0;
242 }
243 
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)244 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
245 					enum idh_request req)
246 {
247 	return xgpu_nv_send_access_requests_with_param(adev,
248 						req, 0, 0, 0);
249 }
250 
xgpu_nv_request_reset(struct amdgpu_device * adev)251 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
252 {
253 	int ret, i = 0;
254 
255 	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
256 		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
257 		if (!ret)
258 			break;
259 		i++;
260 	}
261 
262 	return ret;
263 }
264 
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)265 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
266 					   bool init)
267 {
268 	enum idh_request req;
269 
270 	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
271 	return xgpu_nv_send_access_requests(adev, req);
272 }
273 
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)274 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
275 					   bool init)
276 {
277 	enum idh_request req;
278 	int r = 0;
279 
280 	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
281 	r = xgpu_nv_send_access_requests(adev, req);
282 
283 	return r;
284 }
285 
xgpu_nv_request_init_data(struct amdgpu_device * adev)286 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
287 {
288 	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
289 }
290 
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)291 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
292 					struct amdgpu_irq_src *source,
293 					struct amdgpu_iv_entry *entry)
294 {
295 	dev_dbg(adev->dev, "get ack intr and do nothing.\n");
296 	return 0;
297 }
298 
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)299 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
300 					struct amdgpu_irq_src *source,
301 					unsigned type,
302 					enum amdgpu_interrupt_state state)
303 {
304 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
305 
306 	if (state == AMDGPU_IRQ_STATE_ENABLE)
307 		tmp |= 2;
308 	else
309 		tmp &= ~2;
310 
311 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
312 
313 	return 0;
314 }
315 
xgpu_nv_ready_to_reset(struct amdgpu_device * adev)316 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev)
317 {
318 	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
319 }
320 
xgpu_nv_wait_reset(struct amdgpu_device * adev)321 static int xgpu_nv_wait_reset(struct amdgpu_device *adev)
322 {
323 	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
324 	do {
325 		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) {
326 			dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT - timeout);
327 			return 0;
328 		}
329 		msleep(10);
330 		timeout -= 10;
331 	} while (timeout > 1);
332 
333 	dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n");
334 	return -ETIME;
335 }
336 
xgpu_nv_mailbox_flr_work(struct work_struct * work)337 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
338 {
339 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
340 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
341 	struct amdgpu_reset_context reset_context = { 0 };
342 
343 	amdgpu_virt_fini_data_exchange(adev);
344 
345 	/* Trigger recovery for world switch failure if no TDR */
346 	if (amdgpu_device_should_recover_gpu(adev)
347 		&& (!amdgpu_device_has_job_running(adev) ||
348 		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
349 		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
350 		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
351 		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
352 
353 		reset_context.method = AMD_RESET_METHOD_NONE;
354 		reset_context.reset_req_dev = adev;
355 		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
356 		set_bit(AMDGPU_HOST_FLR, &reset_context.flags);
357 
358 		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
359 	}
360 }
361 
xgpu_nv_mailbox_bad_pages_work(struct work_struct * work)362 static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
363 {
364 	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
365 	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
366 
367 	if (down_read_trylock(&adev->reset_domain->sem)) {
368 		amdgpu_virt_fini_data_exchange(adev);
369 		amdgpu_virt_request_bad_pages(adev);
370 		amdgpu_virt_init_data_exchange(adev);
371 		up_read(&adev->reset_domain->sem);
372 	}
373 }
374 
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)375 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
376 				       struct amdgpu_irq_src *src,
377 				       unsigned type,
378 				       enum amdgpu_interrupt_state state)
379 {
380 	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
381 
382 	if (state == AMDGPU_IRQ_STATE_ENABLE)
383 		tmp |= 1;
384 	else
385 		tmp &= ~1;
386 
387 	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
388 
389 	return 0;
390 }
391 
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)392 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
393 				   struct amdgpu_irq_src *source,
394 				   struct amdgpu_iv_entry *entry)
395 {
396 	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
397 	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
398 
399 	switch (event) {
400 	case IDH_RAS_BAD_PAGES_NOTIFICATION:
401 		xgpu_nv_mailbox_send_ack(adev);
402 		if (amdgpu_sriov_runtime(adev))
403 			schedule_work(&adev->virt.bad_pages_work);
404 		break;
405 	case IDH_UNRECOV_ERR_NOTIFICATION:
406 		xgpu_nv_mailbox_send_ack(adev);
407 		if (!amdgpu_ras_is_rma(adev)) {
408 			ras->is_rma = true;
409 			dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n");
410 		}
411 
412 		if (amdgpu_sriov_runtime(adev))
413 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
414 						&adev->virt.flr_work),
415 					"Failed to queue work! at %s",
416 					__func__);
417 		break;
418 	case IDH_FLR_NOTIFICATION:
419 		if (amdgpu_sriov_runtime(adev))
420 			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
421 				   &adev->virt.flr_work),
422 				  "Failed to queue work! at %s",
423 				  __func__);
424 		break;
425 		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
426 		 * it byfar since that polling thread will handle it,
427 		 * other msg like flr complete is not handled here.
428 		 */
429 	case IDH_CLR_MSG_BUF:
430 	case IDH_FLR_NOTIFICATION_CMPL:
431 	case IDH_READY_TO_ACCESS_GPU:
432 	default:
433 		break;
434 	}
435 
436 	return 0;
437 }
438 
439 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
440 	.set = xgpu_nv_set_mailbox_ack_irq,
441 	.process = xgpu_nv_mailbox_ack_irq,
442 };
443 
444 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
445 	.set = xgpu_nv_set_mailbox_rcv_irq,
446 	.process = xgpu_nv_mailbox_rcv_irq,
447 };
448 
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)449 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
450 {
451 	adev->virt.ack_irq.num_types = 1;
452 	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
453 	adev->virt.rcv_irq.num_types = 1;
454 	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
455 }
456 
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)457 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
458 {
459 	int r;
460 
461 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
462 	if (r)
463 		return r;
464 
465 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
466 	if (r) {
467 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
468 		return r;
469 	}
470 
471 	return 0;
472 }
473 
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)474 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
475 {
476 	int r;
477 
478 	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
479 	if (r)
480 		return r;
481 	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
482 	if (r) {
483 		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
484 		return r;
485 	}
486 
487 	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
488 	INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
489 
490 	return 0;
491 }
492 
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)493 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
494 {
495 	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
496 	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
497 }
498 
xgpu_nv_ras_poison_handler(struct amdgpu_device * adev,enum amdgpu_ras_block block)499 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
500 		enum amdgpu_ras_block block)
501 {
502 	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
503 		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
504 	} else {
505 		amdgpu_virt_fini_data_exchange(adev);
506 		xgpu_nv_send_access_requests_with_param(adev,
507 					IDH_RAS_POISON,	block, 0, 0);
508 	}
509 }
510 
xgpu_nv_rcvd_ras_intr(struct amdgpu_device * adev)511 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
512 {
513 	enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev);
514 
515 	return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
516 }
517 
xgpu_nv_req_ras_err_count(struct amdgpu_device * adev)518 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
519 {
520 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
521 }
522 
xgpu_nv_req_ras_cper_dump(struct amdgpu_device * adev,u64 vf_rptr)523 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr)
524 {
525 	uint32_t vf_rptr_hi, vf_rptr_lo;
526 
527 	vf_rptr_hi = (uint32_t)(vf_rptr >> 32);
528 	vf_rptr_lo = (uint32_t)(vf_rptr & 0xFFFFFFFF);
529 	return xgpu_nv_send_access_requests_with_param(
530 		adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0);
531 }
532 
xgpu_nv_req_ras_bad_pages(struct amdgpu_device * adev)533 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
534 {
535 	return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
536 }
537 
538 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
539 	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
540 	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
541 	.req_init_data  = xgpu_nv_request_init_data,
542 	.reset_gpu = xgpu_nv_request_reset,
543 	.ready_to_reset = xgpu_nv_ready_to_reset,
544 	.wait_reset = xgpu_nv_wait_reset,
545 	.trans_msg = xgpu_nv_mailbox_trans_msg,
546 	.ras_poison_handler = xgpu_nv_ras_poison_handler,
547 	.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
548 	.req_ras_err_count = xgpu_nv_req_ras_err_count,
549 	.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
550 	.req_bad_pages = xgpu_nv_req_ras_bad_pages,
551 };
552