1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #ifndef _XE_MAP_H_
7 #define _XE_MAP_H_
8
9 #include <linux/iosys-map.h>
10
11 #include <xe_device.h>
12
13 /**
14 * DOC: Map layer
15 *
16 * All access to any memory shared with a device (both sysmem and vram) in the
17 * XE driver should go through this layer (xe_map). This layer is built on top
18 * of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
19 * and with extra hooks into the XE driver that allows adding asserts to memory
20 * accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
21 */
22
xe_map_memcpy_to(struct xe_device * xe,struct iosys_map * dst,size_t dst_offset,const void * src,size_t len)23 static inline void xe_map_memcpy_to(struct xe_device *xe, struct iosys_map *dst,
24 size_t dst_offset, const void *src,
25 size_t len)
26 {
27 xe_device_assert_mem_access(xe);
28 iosys_map_memcpy_to(dst, dst_offset, src, len);
29 }
30
xe_map_memcpy_from(struct xe_device * xe,void * dst,const struct iosys_map * src,size_t src_offset,size_t len)31 static inline void xe_map_memcpy_from(struct xe_device *xe, void *dst,
32 const struct iosys_map *src,
33 size_t src_offset, size_t len)
34 {
35 xe_device_assert_mem_access(xe);
36 iosys_map_memcpy_from(dst, src, src_offset, len);
37 }
38
xe_map_memset(struct xe_device * xe,struct iosys_map * dst,size_t offset,int value,size_t len)39 static inline void xe_map_memset(struct xe_device *xe,
40 struct iosys_map *dst, size_t offset,
41 int value, size_t len)
42 {
43 xe_device_assert_mem_access(xe);
44 iosys_map_memset(dst, offset, value, len);
45 }
46
47 /* FIXME: We likely should kill these two functions sooner or later */
xe_map_read32(struct xe_device * xe,struct iosys_map * map)48 static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
49 {
50 xe_device_assert_mem_access(xe);
51
52 if (map->is_iomem)
53 return readl(map->vaddr_iomem);
54 else
55 return READ_ONCE(*(u32 *)map->vaddr);
56 }
57
xe_map_write32(struct xe_device * xe,struct iosys_map * map,u32 val)58 static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
59 u32 val)
60 {
61 xe_device_assert_mem_access(xe);
62
63 if (map->is_iomem)
64 writel(val, map->vaddr_iomem);
65 else
66 *(u32 *)map->vaddr = val;
67 }
68
69 #define xe_map_rd(xe__, map__, offset__, type__) ({ \
70 struct xe_device *__xe = xe__; \
71 xe_device_assert_mem_access(__xe); \
72 iosys_map_rd(map__, offset__, type__); \
73 })
74
75 #define xe_map_wr(xe__, map__, offset__, type__, val__) ({ \
76 struct xe_device *__xe = xe__; \
77 xe_device_assert_mem_access(__xe); \
78 iosys_map_wr(map__, offset__, type__, val__); \
79 })
80
81 #define xe_map_rd_array(xe__, map__, index__, type__) \
82 xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
83
84 #define xe_map_wr_array(xe__, map__, index__, type__, val__) \
85 xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
86
87 #define xe_map_rd_array_u32(xe__, map__, index__) \
88 xe_map_rd_array(xe__, map__, index__, u32)
89
90 #define xe_map_wr_array_u32(xe__, map__, index__, val__) \
91 xe_map_wr_array(xe__, map__, index__, u32, val__)
92
93 #define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
94 xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
95
96 #define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
97 xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
98
99 #define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
100 struct xe_device *__xe = xe__; \
101 xe_device_assert_mem_access(__xe); \
102 iosys_map_rd_field(map__, struct_offset__, struct_type__, field__); \
103 })
104
105 #define xe_map_wr_field(xe__, map__, struct_offset__, struct_type__, field__, val__) ({ \
106 struct xe_device *__xe = xe__; \
107 xe_device_assert_mem_access(__xe); \
108 iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__); \
109 })
110
111 #endif
112