1 /*-
2 * Copyright (c) 2017 Jason A. Harmening.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #ifndef _X86_BUS_DMA_H_
28 #define _X86_BUS_DMA_H_
29
30 #define WANT_INLINE_DMAMAP
31 #include <sys/bus_dma.h>
32 #include <sys/_null.h>
33
34 #include <x86/busdma_impl.h>
35
36 /*
37 * Is DMA address 1:1 mapping of physical address
38 */
39 static inline bool
bus_dma_id_mapped(bus_dma_tag_t dmat,vm_paddr_t buf,bus_size_t buflen)40 bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
41 {
42 struct bus_dma_tag_common *tc;
43
44 tc = (struct bus_dma_tag_common *)dmat;
45 return (tc->impl->id_mapped(dmat, buf, buflen));
46 }
47
48 /*
49 * Allocate a handle for mapping from kva/uva/physical
50 * address space into bus device space.
51 */
52 static inline int
bus_dmamap_create(bus_dma_tag_t dmat,int flags,bus_dmamap_t * mapp)53 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
54 {
55 struct bus_dma_tag_common *tc;
56
57 tc = (struct bus_dma_tag_common *)dmat;
58 return (tc->impl->map_create(dmat, flags, mapp));
59 }
60
61 /*
62 * Destroy a handle for mapping from kva/uva/physical
63 * address space into bus device space.
64 */
65 static inline int
bus_dmamap_destroy(bus_dma_tag_t dmat,bus_dmamap_t map)66 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
67 {
68 struct bus_dma_tag_common *tc;
69
70 tc = (struct bus_dma_tag_common *)dmat;
71 return (tc->impl->map_destroy(dmat, map));
72 }
73
74 /*
75 * Allocate a piece of memory that can be efficiently mapped into
76 * bus device space based on the constraints lited in the dma tag.
77 * A dmamap to for use with dmamap_load is also allocated.
78 */
79 static inline int
bus_dmamem_alloc(bus_dma_tag_t dmat,void ** vaddr,int flags,bus_dmamap_t * mapp)80 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
81 bus_dmamap_t *mapp)
82 {
83 struct bus_dma_tag_common *tc;
84
85 tc = (struct bus_dma_tag_common *)dmat;
86 return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp));
87 }
88
89 /*
90 * Free a piece of memory and it's allociated dmamap, that was allocated
91 * via bus_dmamem_alloc.
92 */
93 static inline void
bus_dmamem_free(bus_dma_tag_t dmat,void * vaddr,bus_dmamap_t map)94 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
95 {
96 struct bus_dma_tag_common *tc;
97
98 tc = (struct bus_dma_tag_common *)dmat;
99 tc->impl->mem_free(dmat, vaddr, map);
100 }
101
102 /*
103 * Release the mapping held by map.
104 */
105 static inline void
bus_dmamap_unload(bus_dma_tag_t dmat,bus_dmamap_t map)106 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
107 {
108 struct bus_dma_tag_common *tc;
109
110 if (map != NULL) {
111 tc = (struct bus_dma_tag_common *)dmat;
112 tc->impl->map_unload(dmat, map);
113 }
114 }
115
116 static inline void
bus_dmamap_sync(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dmasync_op_t op)117 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
118 {
119 struct bus_dma_tag_common *tc;
120
121 if (map != NULL) {
122 tc = (struct bus_dma_tag_common *)dmat;
123 tc->impl->map_sync(dmat, map, op);
124 }
125 }
126
127 /*
128 * Utility function to load a physical buffer. segp contains
129 * the starting segment on entrace, and the ending segment on exit.
130 */
131 static inline int
_bus_dmamap_load_phys(bus_dma_tag_t dmat,bus_dmamap_t map,vm_paddr_t buf,bus_size_t buflen,int flags,bus_dma_segment_t * segs,int * segp)132 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
133 bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
134 {
135 struct bus_dma_tag_common *tc;
136
137 tc = (struct bus_dma_tag_common *)dmat;
138 return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs,
139 segp));
140 }
141
142 static inline int
_bus_dmamap_load_ma(bus_dma_tag_t dmat,bus_dmamap_t map,struct vm_page ** ma,bus_size_t tlen,int ma_offs,int flags,bus_dma_segment_t * segs,int * segp)143 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
144 bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
145 int *segp)
146 {
147 struct bus_dma_tag_common *tc;
148
149 tc = (struct bus_dma_tag_common *)dmat;
150 return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags,
151 segs, segp));
152 }
153
154 /*
155 * Utility function to load a linear buffer. segp contains
156 * the starting segment on entrace, and the ending segment on exit.
157 */
158 static inline int
_bus_dmamap_load_buffer(bus_dma_tag_t dmat,bus_dmamap_t map,void * buf,bus_size_t buflen,struct pmap * pmap,int flags,bus_dma_segment_t * segs,int * segp)159 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
160 bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
161 int *segp)
162 {
163 struct bus_dma_tag_common *tc;
164
165 tc = (struct bus_dma_tag_common *)dmat;
166 return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs,
167 segp));
168 }
169
170 static inline void
_bus_dmamap_waitok(bus_dma_tag_t dmat,bus_dmamap_t map,struct memdesc * mem,bus_dmamap_callback_t * callback,void * callback_arg)171 _bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
172 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
173 {
174 struct bus_dma_tag_common *tc;
175
176 if (map != NULL) {
177 tc = (struct bus_dma_tag_common *)dmat;
178 tc->impl->map_waitok(dmat, map, mem, callback, callback_arg);
179 }
180 }
181
182 static inline bus_dma_segment_t *
_bus_dmamap_complete(bus_dma_tag_t dmat,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,int error)183 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
184 bus_dma_segment_t *segs, int nsegs, int error)
185 {
186 struct bus_dma_tag_common *tc;
187
188 tc = (struct bus_dma_tag_common *)dmat;
189 return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
190 }
191
192 #ifdef KMSAN
193 static inline void
_bus_dmamap_load_kmsan(bus_dma_tag_t dmat,bus_dmamap_t map,struct memdesc * mem)194 _bus_dmamap_load_kmsan(bus_dma_tag_t dmat, bus_dmamap_t map,
195 struct memdesc *mem)
196 {
197 struct bus_dma_tag_common *tc;
198
199 tc = (struct bus_dma_tag_common *)dmat;
200 return (tc->impl->load_kmsan(map, mem));
201 }
202 #endif
203
204 #endif /* !_X86_BUS_DMA_H_ */
205