1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7 
8 #include <linux/aperture.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <uapi/linux/sched/types.h>
12 
13 #include <drm/drm_drv.h>
14 #include <drm/drm_mode_config.h>
15 #include <drm/drm_vblank.h>
16 
17 #include "disp/msm_disp_snapshot.h"
18 #include "msm_drv.h"
19 #include "msm_gem.h"
20 #include "msm_kms.h"
21 #include "msm_mmu.h"
22 
23 static const struct drm_mode_config_funcs mode_config_funcs = {
24 	.fb_create = msm_framebuffer_create,
25 	.atomic_check = msm_atomic_check,
26 	.atomic_commit = drm_atomic_helper_commit,
27 };
28 
29 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
30 	.atomic_commit_tail = msm_atomic_commit_tail,
31 };
32 
msm_irq(int irq,void * arg)33 static irqreturn_t msm_irq(int irq, void *arg)
34 {
35 	struct drm_device *dev = arg;
36 	struct msm_drm_private *priv = dev->dev_private;
37 	struct msm_kms *kms = priv->kms;
38 
39 	BUG_ON(!kms);
40 
41 	return kms->funcs->irq(kms);
42 }
43 
msm_irq_preinstall(struct drm_device * dev)44 static void msm_irq_preinstall(struct drm_device *dev)
45 {
46 	struct msm_drm_private *priv = dev->dev_private;
47 	struct msm_kms *kms = priv->kms;
48 
49 	BUG_ON(!kms);
50 
51 	kms->funcs->irq_preinstall(kms);
52 }
53 
msm_irq_postinstall(struct drm_device * dev)54 static int msm_irq_postinstall(struct drm_device *dev)
55 {
56 	struct msm_drm_private *priv = dev->dev_private;
57 	struct msm_kms *kms = priv->kms;
58 
59 	BUG_ON(!kms);
60 
61 	if (kms->funcs->irq_postinstall)
62 		return kms->funcs->irq_postinstall(kms);
63 
64 	return 0;
65 }
66 
msm_irq_install(struct drm_device * dev,unsigned int irq)67 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
68 {
69 	struct msm_drm_private *priv = dev->dev_private;
70 	struct msm_kms *kms = priv->kms;
71 	int ret;
72 
73 	if (irq == IRQ_NOTCONNECTED)
74 		return -ENOTCONN;
75 
76 	msm_irq_preinstall(dev);
77 
78 	ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
79 	if (ret)
80 		return ret;
81 
82 	kms->irq_requested = true;
83 
84 	ret = msm_irq_postinstall(dev);
85 	if (ret) {
86 		free_irq(irq, dev);
87 		return ret;
88 	}
89 
90 	return 0;
91 }
92 
msm_irq_uninstall(struct drm_device * dev)93 static void msm_irq_uninstall(struct drm_device *dev)
94 {
95 	struct msm_drm_private *priv = dev->dev_private;
96 	struct msm_kms *kms = priv->kms;
97 
98 	kms->funcs->irq_uninstall(kms);
99 	if (kms->irq_requested)
100 		free_irq(kms->irq, dev);
101 }
102 
103 struct msm_vblank_work {
104 	struct work_struct work;
105 	struct drm_crtc *crtc;
106 	bool enable;
107 	struct msm_drm_private *priv;
108 };
109 
vblank_ctrl_worker(struct work_struct * work)110 static void vblank_ctrl_worker(struct work_struct *work)
111 {
112 	struct msm_vblank_work *vbl_work = container_of(work,
113 						struct msm_vblank_work, work);
114 	struct msm_drm_private *priv = vbl_work->priv;
115 	struct msm_kms *kms = priv->kms;
116 
117 	if (vbl_work->enable)
118 		kms->funcs->enable_vblank(kms, vbl_work->crtc);
119 	else
120 		kms->funcs->disable_vblank(kms,	vbl_work->crtc);
121 
122 	kfree(vbl_work);
123 }
124 
vblank_ctrl_queue_work(struct msm_drm_private * priv,struct drm_crtc * crtc,bool enable)125 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
126 				  struct drm_crtc *crtc, bool enable)
127 {
128 	struct msm_vblank_work *vbl_work;
129 
130 	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
131 	if (!vbl_work)
132 		return -ENOMEM;
133 
134 	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
135 
136 	vbl_work->crtc = crtc;
137 	vbl_work->enable = enable;
138 	vbl_work->priv = priv;
139 
140 	queue_work(priv->wq, &vbl_work->work);
141 
142 	return 0;
143 }
144 
msm_crtc_enable_vblank(struct drm_crtc * crtc)145 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
146 {
147 	struct drm_device *dev = crtc->dev;
148 	struct msm_drm_private *priv = dev->dev_private;
149 	struct msm_kms *kms = priv->kms;
150 	if (!kms)
151 		return -ENXIO;
152 	drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
153 	return vblank_ctrl_queue_work(priv, crtc, true);
154 }
155 
msm_crtc_disable_vblank(struct drm_crtc * crtc)156 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
157 {
158 	struct drm_device *dev = crtc->dev;
159 	struct msm_drm_private *priv = dev->dev_private;
160 	struct msm_kms *kms = priv->kms;
161 	if (!kms)
162 		return;
163 	drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
164 	vblank_ctrl_queue_work(priv, crtc, false);
165 }
166 
msm_kms_fault_handler(void * arg,unsigned long iova,int flags,void * data)167 static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void *data)
168 {
169 	struct msm_kms *kms = arg;
170 
171 	if (atomic_read(&kms->fault_snapshot_capture) == 0) {
172 		msm_disp_snapshot_state(kms->dev);
173 		atomic_inc(&kms->fault_snapshot_capture);
174 	}
175 
176 	return -ENOSYS;
177 }
178 
msm_kms_init_aspace(struct drm_device * dev)179 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
180 {
181 	struct msm_gem_address_space *aspace;
182 	struct msm_mmu *mmu;
183 	struct device *mdp_dev = dev->dev;
184 	struct device *mdss_dev = mdp_dev->parent;
185 	struct msm_drm_private *priv = dev->dev_private;
186 	struct msm_kms *kms = priv->kms;
187 	struct device *iommu_dev;
188 
189 	/*
190 	 * IOMMUs can be a part of MDSS device tree binding, or the
191 	 * MDP/DPU device.
192 	 */
193 	if (device_iommu_mapped(mdp_dev))
194 		iommu_dev = mdp_dev;
195 	else
196 		iommu_dev = mdss_dev;
197 
198 	mmu = msm_iommu_disp_new(iommu_dev, 0);
199 	if (IS_ERR(mmu))
200 		return ERR_CAST(mmu);
201 
202 	if (!mmu) {
203 		drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
204 		return NULL;
205 	}
206 
207 	aspace = msm_gem_address_space_create(mmu, "mdp_kms",
208 		0x1000, 0x100000000 - 0x1000);
209 	if (IS_ERR(aspace)) {
210 		dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
211 		mmu->funcs->destroy(mmu);
212 		return aspace;
213 	}
214 
215 	msm_mmu_set_fault_handler(aspace->mmu, kms, msm_kms_fault_handler);
216 
217 	return aspace;
218 }
219 
msm_drm_kms_uninit(struct device * dev)220 void msm_drm_kms_uninit(struct device *dev)
221 {
222 	struct platform_device *pdev = to_platform_device(dev);
223 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
224 	struct drm_device *ddev = priv->dev;
225 	struct msm_kms *kms = priv->kms;
226 	int i;
227 
228 	BUG_ON(!kms);
229 
230 	/* clean up event worker threads */
231 	for (i = 0; i < priv->num_crtcs; i++) {
232 		if (priv->event_thread[i].worker)
233 			kthread_destroy_worker(priv->event_thread[i].worker);
234 	}
235 
236 	drm_kms_helper_poll_fini(ddev);
237 
238 	msm_disp_snapshot_destroy(ddev);
239 
240 	pm_runtime_get_sync(dev);
241 	msm_irq_uninstall(ddev);
242 	pm_runtime_put_sync(dev);
243 
244 	if (kms && kms->funcs)
245 		kms->funcs->destroy(kms);
246 }
247 
msm_drm_kms_init(struct device * dev,const struct drm_driver * drv)248 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
249 {
250 	struct msm_drm_private *priv = dev_get_drvdata(dev);
251 	struct drm_device *ddev = priv->dev;
252 	struct msm_kms *kms = priv->kms;
253 	struct drm_crtc *crtc;
254 	int ret;
255 
256 	/* the fw fb could be anywhere in memory */
257 	ret = aperture_remove_all_conflicting_devices(drv->name);
258 	if (ret)
259 		return ret;
260 
261 	ret = priv->kms_init(ddev);
262 	if (ret) {
263 		DRM_DEV_ERROR(dev, "failed to load kms\n");
264 		return ret;
265 	}
266 
267 	/* Enable normalization of plane zpos */
268 	ddev->mode_config.normalize_zpos = true;
269 
270 	ddev->mode_config.funcs = &mode_config_funcs;
271 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
272 
273 	kms->dev = ddev;
274 	ret = kms->funcs->hw_init(kms);
275 	if (ret) {
276 		DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
277 		goto err_msm_uninit;
278 	}
279 
280 	drm_helper_move_panel_connectors_to_head(ddev);
281 
282 	drm_for_each_crtc(crtc, ddev) {
283 		struct msm_drm_thread *ev_thread;
284 
285 		/* initialize event thread */
286 		ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
287 		ev_thread->dev = ddev;
288 		ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
289 		if (IS_ERR(ev_thread->worker)) {
290 			ret = PTR_ERR(ev_thread->worker);
291 			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
292 			ev_thread->worker = NULL;
293 			goto err_msm_uninit;
294 		}
295 
296 		sched_set_fifo(ev_thread->worker->task);
297 	}
298 
299 	ret = drm_vblank_init(ddev, priv->num_crtcs);
300 	if (ret < 0) {
301 		DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
302 		goto err_msm_uninit;
303 	}
304 
305 	pm_runtime_get_sync(dev);
306 	ret = msm_irq_install(ddev, kms->irq);
307 	pm_runtime_put_sync(dev);
308 	if (ret < 0) {
309 		DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
310 		goto err_msm_uninit;
311 	}
312 
313 	ret = msm_disp_snapshot_init(ddev);
314 	if (ret)
315 		DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
316 
317 	drm_mode_config_reset(ddev);
318 
319 	return 0;
320 
321 err_msm_uninit:
322 	return ret;
323 }
324 
msm_kms_pm_prepare(struct device * dev)325 int msm_kms_pm_prepare(struct device *dev)
326 {
327 	struct msm_drm_private *priv = dev_get_drvdata(dev);
328 	struct drm_device *ddev = priv ? priv->dev : NULL;
329 
330 	if (!priv || !priv->kms)
331 		return 0;
332 
333 	return drm_mode_config_helper_suspend(ddev);
334 }
335 
msm_kms_pm_complete(struct device * dev)336 void msm_kms_pm_complete(struct device *dev)
337 {
338 	struct msm_drm_private *priv = dev_get_drvdata(dev);
339 	struct drm_device *ddev = priv ? priv->dev : NULL;
340 
341 	if (!priv || !priv->kms)
342 		return;
343 
344 	drm_mode_config_helper_resume(ddev);
345 }
346 
msm_kms_shutdown(struct platform_device * pdev)347 void msm_kms_shutdown(struct platform_device *pdev)
348 {
349 	struct msm_drm_private *priv = platform_get_drvdata(pdev);
350 	struct drm_device *drm = priv ? priv->dev : NULL;
351 
352 	/*
353 	 * Shutdown the hw if we're far enough along where things might be on.
354 	 * If we run this too early, we'll end up panicking in any variety of
355 	 * places. Since we don't register the drm device until late in
356 	 * msm_drm_init, drm_dev->registered is used as an indicator that the
357 	 * shutdown will be successful.
358 	 */
359 	if (drm && drm->registered && priv->kms)
360 		drm_atomic_helper_shutdown(drm);
361 }
362