1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #ifndef PVR_DEVICE_H
5 #define PVR_DEVICE_H
6
7 #include "pvr_ccb.h"
8 #include "pvr_device_info.h"
9 #include "pvr_fw.h"
10 #include "pvr_params.h"
11 #include "pvr_rogue_fwif_stream.h"
12 #include "pvr_stream.h"
13
14 #include <drm/drm_device.h>
15 #include <drm/drm_file.h>
16 #include <drm/drm_mm.h>
17
18 #include <linux/bits.h>
19 #include <linux/compiler_attributes.h>
20 #include <linux/compiler_types.h>
21 #include <linux/device.h>
22 #include <linux/io.h>
23 #include <linux/iopoll.h>
24 #include <linux/kernel.h>
25 #include <linux/math.h>
26 #include <linux/mutex.h>
27 #include <linux/spinlock_types.h>
28 #include <linux/timer.h>
29 #include <linux/types.h>
30 #include <linux/wait.h>
31 #include <linux/workqueue.h>
32 #include <linux/xarray.h>
33
34 /* Forward declaration from <linux/clk.h>. */
35 struct clk;
36
37 /* Forward declaration from <linux/firmware.h>. */
38 struct firmware;
39
40 /**
41 * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
42 * @b: Branch ID.
43 * @v: Version ID.
44 * @n: Number of scalable units.
45 * @c: Config ID.
46 */
47 struct pvr_gpu_id {
48 u16 b, v, n, c;
49 };
50
51 /**
52 * struct pvr_fw_version - Firmware version information
53 * @major: Major version number.
54 * @minor: Minor version number.
55 */
56 struct pvr_fw_version {
57 u16 major, minor;
58 };
59
60 /**
61 * struct pvr_device - powervr-specific wrapper for &struct drm_device
62 */
63 struct pvr_device {
64 /**
65 * @base: The underlying &struct drm_device.
66 *
67 * Do not access this member directly, instead call
68 * from_pvr_device().
69 */
70 struct drm_device base;
71
72 /** @gpu_id: GPU ID detected at runtime. */
73 struct pvr_gpu_id gpu_id;
74
75 /**
76 * @features: Hardware feature information.
77 *
78 * Do not access this member directly, instead use PVR_HAS_FEATURE()
79 * or PVR_FEATURE_VALUE() macros.
80 */
81 struct pvr_device_features features;
82
83 /**
84 * @quirks: Hardware quirk information.
85 *
86 * Do not access this member directly, instead use PVR_HAS_QUIRK().
87 */
88 struct pvr_device_quirks quirks;
89
90 /**
91 * @enhancements: Hardware enhancement information.
92 *
93 * Do not access this member directly, instead use
94 * PVR_HAS_ENHANCEMENT().
95 */
96 struct pvr_device_enhancements enhancements;
97
98 /** @fw_version: Firmware version detected at runtime. */
99 struct pvr_fw_version fw_version;
100
101 /** @regs_resource: Resource representing device control registers. */
102 struct resource *regs_resource;
103
104 /**
105 * @regs: Device control registers.
106 *
107 * These are mapped into memory when the device is initialized; that
108 * location is where this pointer points.
109 */
110 void __iomem *regs;
111
112 /**
113 * @core_clk: General core clock.
114 *
115 * This is the primary clock used by the entire GPU core.
116 */
117 struct clk *core_clk;
118
119 /**
120 * @sys_clk: Optional system bus clock.
121 *
122 * This may be used on some platforms to provide an independent clock to the SoC Interface
123 * (SOCIF). If present, this needs to be enabled/disabled together with @core_clk.
124 */
125 struct clk *sys_clk;
126
127 /**
128 * @mem_clk: Optional memory clock.
129 *
130 * This may be used on some platforms to provide an independent clock to the Memory
131 * Interface (MEMIF). If present, this needs to be enabled/disabled together with @core_clk.
132 */
133 struct clk *mem_clk;
134
135 struct pvr_device_power {
136 struct device **domain_devs;
137 struct device_link **domain_links;
138
139 u32 domain_count;
140 } power;
141
142 /**
143 * @reset: Optional reset line.
144 *
145 * This may be used on some platforms to provide a reset line that needs to be de-asserted
146 * after power-up procedure. It would also need to be asserted after the power-down
147 * procedure.
148 */
149 struct reset_control *reset;
150
151 /** @irq: IRQ number. */
152 int irq;
153
154 /** @fwccb: Firmware CCB. */
155 struct pvr_ccb fwccb;
156
157 /**
158 * @kernel_vm_ctx: Virtual memory context used for kernel mappings.
159 *
160 * This is used for mappings in the firmware address region when a META firmware processor
161 * is in use.
162 *
163 * When a MIPS firmware processor is in use, this will be %NULL.
164 */
165 struct pvr_vm_context *kernel_vm_ctx;
166
167 /** @fw_dev: Firmware related data. */
168 struct pvr_fw_device fw_dev;
169
170 /**
171 * @params: Device-specific parameters.
172 *
173 * The values of these parameters are initialized from the
174 * defaults specified as module parameters. They may be
175 * modified at runtime via debugfs (if enabled).
176 */
177 struct pvr_device_params params;
178
179 /** @stream_musthave_quirks: Bit array of "must-have" quirks for stream commands. */
180 u32 stream_musthave_quirks[PVR_STREAM_TYPE_MAX][PVR_STREAM_EXTHDR_TYPE_MAX];
181
182 /**
183 * @mmu_flush_cache_flags: Records which MMU caches require flushing
184 * before submitting the next job.
185 */
186 atomic_t mmu_flush_cache_flags;
187
188 /**
189 * @ctx_ids: Array of contexts belonging to this device. Array members
190 * are of type "struct pvr_context *".
191 *
192 * This array is used to allocate IDs used by the firmware.
193 */
194 struct xarray ctx_ids;
195
196 /**
197 * @free_list_ids: Array of free lists belonging to this device. Array members
198 * are of type "struct pvr_free_list *".
199 *
200 * This array is used to allocate IDs used by the firmware.
201 */
202 struct xarray free_list_ids;
203
204 /**
205 * @job_ids: Array of jobs belonging to this device. Array members
206 * are of type "struct pvr_job *".
207 */
208 struct xarray job_ids;
209
210 /**
211 * @queues: Queue-related fields.
212 */
213 struct {
214 /** @queues.active: Active queue list. */
215 struct list_head active;
216
217 /** @queues.idle: Idle queue list. */
218 struct list_head idle;
219
220 /** @queues.lock: Lock protecting access to the active/idle
221 * lists. */
222 struct mutex lock;
223 } queues;
224
225 /**
226 * @watchdog: Watchdog for communications with firmware.
227 */
228 struct {
229 /** @watchdog.work: Work item for watchdog callback. */
230 struct delayed_work work;
231
232 /**
233 * @watchdog.old_kccb_cmds_executed: KCCB command execution
234 * count at last watchdog poll.
235 */
236 u32 old_kccb_cmds_executed;
237
238 /**
239 * @watchdog.kccb_stall_count: Number of watchdog polls
240 * KCCB has been stalled for.
241 */
242 u32 kccb_stall_count;
243 } watchdog;
244
245 /**
246 * @kccb: Circular buffer for communications with firmware.
247 */
248 struct {
249 /** @kccb.ccb: Kernel CCB. */
250 struct pvr_ccb ccb;
251
252 /** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */
253 wait_queue_head_t rtn_q;
254
255 /** @kccb.rtn_obj: Object representing KCCB return slots. */
256 struct pvr_fw_object *rtn_obj;
257
258 /**
259 * @kccb.rtn: Pointer to CPU mapping of KCCB return slots.
260 * Must be accessed by READ_ONCE()/WRITE_ONCE().
261 */
262 u32 *rtn;
263
264 /** @kccb.slot_count: Total number of KCCB slots available. */
265 u32 slot_count;
266
267 /** @kccb.reserved_count: Number of KCCB slots reserved for
268 * future use. */
269 u32 reserved_count;
270
271 /**
272 * @kccb.waiters: List of KCCB slot waiters.
273 */
274 struct list_head waiters;
275
276 /** @kccb.fence_ctx: KCCB fence context. */
277 struct {
278 /** @kccb.fence_ctx.id: KCCB fence context ID
279 * allocated with dma_fence_context_alloc(). */
280 u64 id;
281
282 /** @kccb.fence_ctx.seqno: Sequence number incremented
283 * each time a fence is created. */
284 atomic_t seqno;
285
286 /**
287 * @kccb.fence_ctx.lock: Lock used to synchronize
288 * access to fences allocated by this context.
289 */
290 spinlock_t lock;
291 } fence_ctx;
292 } kccb;
293
294 /**
295 * @lost: %true if the device has been lost.
296 *
297 * This variable is set if the device has become irretrievably unavailable, e.g. if the
298 * firmware processor has stopped responding and can not be revived via a hard reset.
299 */
300 bool lost;
301
302 /**
303 * @reset_sem: Reset semaphore.
304 *
305 * GPU reset code will lock this for writing. Any code that submits commands to the firmware
306 * that isn't in an IRQ handler or on the scheduler workqueue must lock this for reading.
307 * Once this has been successfully locked, &pvr_dev->lost _must_ be checked, and -%EIO must
308 * be returned if it is set.
309 */
310 struct rw_semaphore reset_sem;
311
312 /** @sched_wq: Workqueue for schedulers. */
313 struct workqueue_struct *sched_wq;
314
315 /**
316 * @ctx_list_lock: Lock to be held when accessing the context list in
317 * struct pvr_file.
318 */
319 spinlock_t ctx_list_lock;
320
321 /** @has_safety_events: Whether this device can raise safety events. */
322 bool has_safety_events;
323 };
324
325 /**
326 * struct pvr_file - powervr-specific data to be assigned to &struct
327 * drm_file.driver_priv
328 */
329 struct pvr_file {
330 /**
331 * @file: A reference to the parent &struct drm_file.
332 *
333 * Do not access this member directly, instead call from_pvr_file().
334 */
335 struct drm_file *file;
336
337 /**
338 * @pvr_dev: A reference to the powervr-specific wrapper for the
339 * associated device. Saves on repeated calls to to_pvr_device().
340 */
341 struct pvr_device *pvr_dev;
342
343 /**
344 * @ctx_handles: Array of contexts belonging to this file. Array members
345 * are of type "struct pvr_context *".
346 *
347 * This array is used to allocate handles returned to userspace.
348 */
349 struct xarray ctx_handles;
350
351 /**
352 * @free_list_handles: Array of free lists belonging to this file. Array
353 * members are of type "struct pvr_free_list *".
354 *
355 * This array is used to allocate handles returned to userspace.
356 */
357 struct xarray free_list_handles;
358
359 /**
360 * @hwrt_handles: Array of HWRT datasets belonging to this file. Array
361 * members are of type "struct pvr_hwrt_dataset *".
362 *
363 * This array is used to allocate handles returned to userspace.
364 */
365 struct xarray hwrt_handles;
366
367 /**
368 * @vm_ctx_handles: Array of VM contexts belonging to this file. Array
369 * members are of type "struct pvr_vm_context *".
370 *
371 * This array is used to allocate handles returned to userspace.
372 */
373 struct xarray vm_ctx_handles;
374
375 /** @contexts: PVR context list. */
376 struct list_head contexts;
377 };
378
379 /**
380 * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature
381 * @pvr_dev: [IN] Target PowerVR device.
382 * @feature: [IN] Hardware feature name.
383 *
384 * Feature names are derived from those found in &struct pvr_device_features by
385 * dropping the 'has_' prefix, which is applied by this macro.
386 *
387 * Return:
388 * * true if the named feature is present in the hardware
389 * * false if the named feature is not present in the hardware
390 */
391 #define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature)
392
393 /**
394 * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value
395 * @pvr_dev: [IN] Target PowerVR device.
396 * @feature: [IN] Feature name.
397 * @value_out: [OUT] Feature value.
398 *
399 * This macro will get a feature value for those features that have values.
400 * If the feature is not present, nothing will be stored to @value_out.
401 *
402 * Feature names are derived from those found in &struct pvr_device_features by
403 * dropping the 'has_' prefix.
404 *
405 * Return:
406 * * 0 on success, or
407 * * -%EINVAL if the named feature is not present in the hardware
408 */
409 #define PVR_FEATURE_VALUE(pvr_dev, feature, value_out) \
410 ({ \
411 struct pvr_device *_pvr_dev = pvr_dev; \
412 int _ret = -EINVAL; \
413 if (_pvr_dev->features.has_##feature) { \
414 *(value_out) = _pvr_dev->features.feature; \
415 _ret = 0; \
416 } \
417 _ret; \
418 })
419
420 /**
421 * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk
422 * @pvr_dev: [IN] Target PowerVR device.
423 * @quirk: [IN] Hardware quirk name.
424 *
425 * Quirk numbers are derived from those found in #pvr_device_quirks by
426 * dropping the 'has_brn' prefix, which is applied by this macro.
427 *
428 * Returns
429 * * true if the quirk is present in the hardware, or
430 * * false if the quirk is not present in the hardware.
431 */
432 #define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk)
433
434 /**
435 * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given
436 * enhancement
437 * @pvr_dev: [IN] Target PowerVR device.
438 * @enhancement: [IN] Hardware enhancement name.
439 *
440 * Enhancement numbers are derived from those found in #pvr_device_enhancements
441 * by dropping the 'has_ern' prefix, which is applied by this macro.
442 *
443 * Returns
444 * * true if the enhancement is present in the hardware, or
445 * * false if the enhancement is not present in the hardware.
446 */
447 #define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement)
448
449 #define from_pvr_device(pvr_dev) (&(pvr_dev)->base)
450
451 #define to_pvr_device(drm_dev) container_of_const(drm_dev, struct pvr_device, base)
452
453 #define from_pvr_file(pvr_file) ((pvr_file)->file)
454
455 #define to_pvr_file(file) ((file)->driver_priv)
456
457 /**
458 * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer
459 * @b: Branch ID.
460 * @v: Version ID.
461 * @n: Number of scalable units.
462 * @c: Config ID.
463 *
464 * The packed layout is as follows:
465 *
466 * +--------+--------+--------+-------+
467 * | 63..48 | 47..32 | 31..16 | 15..0 |
468 * +========+========+========+=======+
469 * | B | V | N | C |
470 * +--------+--------+--------+-------+
471 *
472 * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a
473 * &struct pvr_gpu_id is available in order to ensure proper type checking.
474 *
475 * Return: Packed BVNC.
476 */
477 /* clang-format off */
478 #define PVR_PACKED_BVNC(b, v, n, c) \
479 ((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \
480 (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \
481 (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \
482 (((u64)(c) & GENMASK_ULL(15, 0)) << 0))
483 /* clang-format on */
484
485 /**
486 * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit
487 * unsigned integer
488 * @gpu_id: GPU ID.
489 *
490 * The packed layout is as follows:
491 *
492 * +--------+--------+--------+-------+
493 * | 63..48 | 47..32 | 31..16 | 15..0 |
494 * +========+========+========+=======+
495 * | B | V | N | C |
496 * +--------+--------+--------+-------+
497 *
498 * This should be used in preference to PVR_PACKED_BVNC() when a &struct
499 * pvr_gpu_id is available in order to ensure proper type checking.
500 *
501 * Return: Packed BVNC.
502 */
503 static __always_inline u64
pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id * gpu_id)504 pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id)
505 {
506 return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c);
507 }
508
509 static __always_inline void
packed_bvnc_to_pvr_gpu_id(u64 bvnc,struct pvr_gpu_id * gpu_id)510 packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id)
511 {
512 gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48;
513 gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32;
514 gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16;
515 gpu_id->c = bvnc & GENMASK_ULL(15, 0);
516 }
517
518 int pvr_device_init(struct pvr_device *pvr_dev);
519 void pvr_device_fini(struct pvr_device *pvr_dev);
520 void pvr_device_reset(struct pvr_device *pvr_dev);
521
522 bool
523 pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk);
524 bool
525 pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement);
526 bool
527 pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature);
528
529 /**
530 * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register
531 * @val: Value of the target register.
532 * @field: Field specifier, as defined in "pvr_rogue_cr_defs.h".
533 *
534 * Return: The extracted field.
535 */
536 #define PVR_CR_FIELD_GET(val, field) FIELD_GET(~ROGUE_CR_##field##_CLRMSK, val)
537
538 /**
539 * pvr_cr_read32() - Read a 32-bit register from a PowerVR device
540 * @pvr_dev: Target PowerVR device.
541 * @reg: Target register.
542 *
543 * Return: The value of the requested register.
544 */
545 static __always_inline u32
pvr_cr_read32(struct pvr_device * pvr_dev,u32 reg)546 pvr_cr_read32(struct pvr_device *pvr_dev, u32 reg)
547 {
548 return ioread32(pvr_dev->regs + reg);
549 }
550
551 /**
552 * pvr_cr_read64() - Read a 64-bit register from a PowerVR device
553 * @pvr_dev: Target PowerVR device.
554 * @reg: Target register.
555 *
556 * Return: The value of the requested register.
557 */
558 static __always_inline u64
pvr_cr_read64(struct pvr_device * pvr_dev,u32 reg)559 pvr_cr_read64(struct pvr_device *pvr_dev, u32 reg)
560 {
561 return ioread64(pvr_dev->regs + reg);
562 }
563
564 /**
565 * pvr_cr_write32() - Write to a 32-bit register in a PowerVR device
566 * @pvr_dev: Target PowerVR device.
567 * @reg: Target register.
568 * @val: Value to write.
569 */
570 static __always_inline void
pvr_cr_write32(struct pvr_device * pvr_dev,u32 reg,u32 val)571 pvr_cr_write32(struct pvr_device *pvr_dev, u32 reg, u32 val)
572 {
573 iowrite32(val, pvr_dev->regs + reg);
574 }
575
576 /**
577 * pvr_cr_write64() - Write to a 64-bit register in a PowerVR device
578 * @pvr_dev: Target PowerVR device.
579 * @reg: Target register.
580 * @val: Value to write.
581 */
582 static __always_inline void
pvr_cr_write64(struct pvr_device * pvr_dev,u32 reg,u64 val)583 pvr_cr_write64(struct pvr_device *pvr_dev, u32 reg, u64 val)
584 {
585 iowrite64(val, pvr_dev->regs + reg);
586 }
587
588 /**
589 * pvr_cr_poll_reg32() - Wait for a 32-bit register to match a given value by
590 * polling
591 * @pvr_dev: Target PowerVR device.
592 * @reg_addr: Address of register.
593 * @reg_value: Expected register value (after masking).
594 * @reg_mask: Mask of bits valid for comparison with @reg_value.
595 * @timeout_usec: Timeout length, in us.
596 *
597 * Returns:
598 * * 0 on success, or
599 * * -%ETIMEDOUT on timeout.
600 */
601 static __always_inline int
pvr_cr_poll_reg32(struct pvr_device * pvr_dev,u32 reg_addr,u32 reg_value,u32 reg_mask,u64 timeout_usec)602 pvr_cr_poll_reg32(struct pvr_device *pvr_dev, u32 reg_addr, u32 reg_value,
603 u32 reg_mask, u64 timeout_usec)
604 {
605 u32 value;
606
607 return readl_poll_timeout(pvr_dev->regs + reg_addr, value,
608 (value & reg_mask) == reg_value, 0, timeout_usec);
609 }
610
611 /**
612 * pvr_cr_poll_reg64() - Wait for a 64-bit register to match a given value by
613 * polling
614 * @pvr_dev: Target PowerVR device.
615 * @reg_addr: Address of register.
616 * @reg_value: Expected register value (after masking).
617 * @reg_mask: Mask of bits valid for comparison with @reg_value.
618 * @timeout_usec: Timeout length, in us.
619 *
620 * Returns:
621 * * 0 on success, or
622 * * -%ETIMEDOUT on timeout.
623 */
624 static __always_inline int
pvr_cr_poll_reg64(struct pvr_device * pvr_dev,u32 reg_addr,u64 reg_value,u64 reg_mask,u64 timeout_usec)625 pvr_cr_poll_reg64(struct pvr_device *pvr_dev, u32 reg_addr, u64 reg_value,
626 u64 reg_mask, u64 timeout_usec)
627 {
628 u64 value;
629
630 return readq_poll_timeout(pvr_dev->regs + reg_addr, value,
631 (value & reg_mask) == reg_value, 0, timeout_usec);
632 }
633
634 /**
635 * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline
636 * aligned
637 * @pvr_dev: Target PowerVR device.
638 * @size: Initial size, in bytes.
639 *
640 * Returns:
641 * * Size aligned to cacheline size.
642 */
643 static __always_inline size_t
pvr_round_up_to_cacheline_size(struct pvr_device * pvr_dev,size_t size)644 pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size)
645 {
646 u16 slc_cacheline_size_bits = 0;
647 u16 slc_cacheline_size_bytes;
648
649 WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_bits));
650 PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits,
651 &slc_cacheline_size_bits);
652 slc_cacheline_size_bytes = slc_cacheline_size_bits / 8;
653
654 return round_up(size, slc_cacheline_size_bytes);
655 }
656
657 /**
658 * DOC: IOCTL validation helpers
659 *
660 * To validate the constraints imposed on IOCTL argument structs, a collection
661 * of macros and helper functions exist in ``pvr_device.h``.
662 *
663 * Of the current helpers, it should only be necessary to call
664 * PVR_IOCTL_UNION_PADDING_CHECK() directly. This macro should be used once in
665 * every code path which extracts a union member from a struct passed from
666 * userspace.
667 */
668
669 /**
670 * pvr_ioctl_union_padding_check() - Validate that the implicit padding between
671 * the end of a union member and the end of the union itself is zeroed.
672 * @instance: Pointer to the instance of the struct to validate.
673 * @union_offset: Offset into the type of @instance of the target union. Must
674 * be 64-bit aligned.
675 * @union_size: Size of the target union in the type of @instance. Must be
676 * 64-bit aligned.
677 * @member_size: Size of the target member in the target union specified by
678 * @union_offset and @union_size. It is assumed that the offset of the target
679 * member is zero relative to @union_offset. Must be 64-bit aligned.
680 *
681 * You probably want to use PVR_IOCTL_UNION_PADDING_CHECK() instead of calling
682 * this function directly, since that macro abstracts away much of the setup,
683 * and also provides some static validation. See its docs for details.
684 *
685 * Return:
686 * * %true if every byte between the end of the used member of the union and
687 * the end of that union is zeroed, or
688 * * %false otherwise.
689 */
690 static __always_inline bool
pvr_ioctl_union_padding_check(void * instance,size_t union_offset,size_t union_size,size_t member_size)691 pvr_ioctl_union_padding_check(void *instance, size_t union_offset,
692 size_t union_size, size_t member_size)
693 {
694 /*
695 * void pointer arithmetic is technically illegal - cast to a byte
696 * pointer so this addition works safely.
697 */
698 void *padding_start = ((u8 *)instance) + union_offset + member_size;
699 size_t padding_size = union_size - member_size;
700
701 return mem_is_zero(padding_start, padding_size);
702 }
703
704 /**
705 * PVR_STATIC_ASSERT_64BIT_ALIGNED() - Inline assertion for 64-bit alignment.
706 * @static_expr_: Target expression to evaluate.
707 *
708 * If @static_expr_ does not evaluate to a constant integer which would be a
709 * 64-bit aligned address (i.e. a multiple of 8), compilation will fail.
710 *
711 * Return:
712 * The value of @static_expr_.
713 */
714 #define PVR_STATIC_ASSERT_64BIT_ALIGNED(static_expr_) \
715 ({ \
716 static_assert(((static_expr_) & (sizeof(u64) - 1)) == 0); \
717 (static_expr_); \
718 })
719
720 /**
721 * PVR_IOCTL_UNION_PADDING_CHECK() - Validate that the implicit padding between
722 * the end of a union member and the end of the union itself is zeroed.
723 * @struct_instance_: An expression which evaluates to a pointer to a UAPI data
724 * struct.
725 * @union_: The name of the union member of @struct_instance_ to check. If the
726 * union member is nested within the type of @struct_instance_, this may
727 * contain the member access operator (".").
728 * @member_: The name of the member of @union_ to assess.
729 *
730 * This is a wrapper around pvr_ioctl_union_padding_check() which performs
731 * alignment checks and simplifies things for the caller.
732 *
733 * Return:
734 * * %true if every byte in @struct_instance_ between the end of @member_ and
735 * the end of @union_ is zeroed, or
736 * * %false otherwise.
737 */
738 #define PVR_IOCTL_UNION_PADDING_CHECK(struct_instance_, union_, member_) \
739 ({ \
740 typeof(struct_instance_) __instance = (struct_instance_); \
741 size_t __union_offset = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
742 offsetof(typeof(*__instance), union_)); \
743 size_t __union_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
744 sizeof(__instance->union_)); \
745 size_t __member_size = PVR_STATIC_ASSERT_64BIT_ALIGNED( \
746 sizeof(__instance->union_.member_)); \
747 pvr_ioctl_union_padding_check(__instance, __union_offset, \
748 __union_size, __member_size); \
749 })
750
751 /*
752 * These utility functions should more properly be placed in pvr_fw.h, but that
753 * would cause a dependency cycle between that header and this one. Since
754 * they're primarily used in pvr_device.c, let's put them in here for now.
755 */
756
757 static __always_inline bool
pvr_fw_irq_pending(struct pvr_device * pvr_dev)758 pvr_fw_irq_pending(struct pvr_device *pvr_dev)
759 {
760 return pvr_dev->fw_dev.defs->irq_pending(pvr_dev);
761 }
762
763 static __always_inline void
pvr_fw_irq_clear(struct pvr_device * pvr_dev)764 pvr_fw_irq_clear(struct pvr_device *pvr_dev)
765 {
766 pvr_dev->fw_dev.defs->irq_clear(pvr_dev);
767 }
768
769 #endif /* PVR_DEVICE_H */
770