Lines Matching +full:dma +full:- +full:safe +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
6 * limited DMA windows. These functions utilize bounce buffers to
7 * copy data to/from buffers located outside the DMA region. This
8 * only works for systems in which DMA memory is at the bottom of
9 * RAM, the remainder of memory is at the top and the DMA memory
11 * DMA windows will require custom implementations that reserve memory
15 * Re-written by Christopher Hoover <ch@murgatroid.com>
25 #include <linux/page-flags.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
34 #include <asm/dma-iommu.h>
54 /* safe buffer info */
56 void *safe; member
89 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in dmabounce_show()
91 device_info->small.allocs, in dmabounce_show()
92 device_info->large.allocs, in dmabounce_show()
93 device_info->total_allocs - device_info->small.allocs - in dmabounce_show()
94 device_info->large.allocs, in dmabounce_show()
95 device_info->total_allocs, in dmabounce_show()
96 device_info->map_op_count, in dmabounce_show()
97 device_info->bounce_count); in dmabounce_show()
104 /* allocate a 'safe' buffer and keep track of it */
111 struct device *dev = device_info->dev; in alloc_safe_buffer()
117 if (size <= device_info->small.size) { in alloc_safe_buffer()
118 pool = &device_info->small; in alloc_safe_buffer()
119 } else if (size <= device_info->large.size) { in alloc_safe_buffer()
120 pool = &device_info->large; in alloc_safe_buffer()
131 buf->ptr = ptr; in alloc_safe_buffer()
132 buf->size = size; in alloc_safe_buffer()
133 buf->direction = dir; in alloc_safe_buffer()
134 buf->pool = pool; in alloc_safe_buffer()
137 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, in alloc_safe_buffer()
138 &buf->safe_dma_addr); in alloc_safe_buffer()
140 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, in alloc_safe_buffer()
144 if (buf->safe == NULL) { in alloc_safe_buffer()
146 "%s: could not alloc dma memory (size=%d)\n", in alloc_safe_buffer()
154 pool->allocs++; in alloc_safe_buffer()
155 device_info->total_allocs++; in alloc_safe_buffer()
158 write_lock_irqsave(&device_info->lock, flags); in alloc_safe_buffer()
159 list_add(&buf->node, &device_info->safe_buffers); in alloc_safe_buffer()
160 write_unlock_irqrestore(&device_info->lock, flags); in alloc_safe_buffer()
165 /* determine if a buffer is from our "safe" pool */
172 read_lock_irqsave(&device_info->lock, flags); in find_safe_buffer()
174 list_for_each_entry(b, &device_info->safe_buffers, node) in find_safe_buffer()
175 if (b->safe_dma_addr <= safe_dma_addr && in find_safe_buffer()
176 b->safe_dma_addr + b->size > safe_dma_addr) { in find_safe_buffer()
181 read_unlock_irqrestore(&device_info->lock, flags); in find_safe_buffer()
190 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); in free_safe_buffer()
192 write_lock_irqsave(&device_info->lock, flags); in free_safe_buffer()
194 list_del(&buf->node); in free_safe_buffer()
196 write_unlock_irqrestore(&device_info->lock, flags); in free_safe_buffer()
198 if (buf->pool) in free_safe_buffer()
199 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); in free_safe_buffer()
201 dma_free_coherent(device_info->dev, buf->size, buf->safe, in free_safe_buffer()
202 buf->safe_dma_addr); in free_safe_buffer()
212 if (!dev || !dev->archdata.dmabounce) in find_safe_buffer_dev()
218 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); in find_safe_buffer_dev()
223 if (!dev || !dev->archdata.dmabounce) in needs_bounce()
226 if (dev->dma_mask) { in needs_bounce()
227 unsigned long limit, mask = *dev->dma_mask; in needs_bounce()
231 dev_err(dev, "DMA mapping too big (requested %#x " in needs_bounce()
232 "mask %#Lx)\n", size, *dev->dma_mask); in needs_bounce()
233 return -E2BIG; in needs_bounce()
236 /* Figure out if we need to bounce from the DMA mask. */ in needs_bounce()
237 if ((dma_addr | (dma_addr + size - 1)) & ~mask) in needs_bounce()
241 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); in needs_bounce()
248 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in map_single()
252 DO_STATS ( device_info->map_op_count++ ); in map_single()
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", in map_single()
261 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", in map_single()
262 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), in map_single()
263 buf->safe, buf->safe_dma_addr); in map_single()
267 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", in map_single()
268 __func__, ptr, buf->safe, size); in map_single()
269 memcpy(buf->safe, ptr, size); in map_single()
272 return buf->safe_dma_addr; in map_single()
279 BUG_ON(buf->size != size); in unmap_single()
280 BUG_ON(buf->direction != dir); in unmap_single()
282 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", in unmap_single()
283 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), in unmap_single()
284 buf->safe, buf->safe_dma_addr); in unmap_single()
286 DO_STATS(dev->archdata.dmabounce->bounce_count++); in unmap_single()
290 void *ptr = buf->ptr; in unmap_single()
292 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", in unmap_single()
293 __func__, buf->safe, ptr, size); in unmap_single()
294 memcpy(ptr, buf->safe, size); in unmap_single()
303 free_safe_buffer(dev->archdata.dmabounce, buf); in unmap_single()
310 * allocate a 'safe' buffer and copy the unsafe buffer into it.
311 * substitute the safe buffer for the unsafe one.
312 * (basically move the buffer from an unsafe area to a safe one)
336 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); in dmabounce_map_page()
344 * see if a mapped address was really a "safe" buffer and if so, copy
345 * the data from the safe buffer back to the unsafe buffer and free up
346 * the safe buffer. (basically return things back to the way they
354 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", in dmabounce_unmap_page()
372 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", in __dmabounce_sync_for_cpu()
379 off = addr - buf->safe_dma_addr; in __dmabounce_sync_for_cpu()
381 BUG_ON(buf->direction != dir); in __dmabounce_sync_for_cpu()
383 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", in __dmabounce_sync_for_cpu()
384 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, in __dmabounce_sync_for_cpu()
385 buf->safe, buf->safe_dma_addr); in __dmabounce_sync_for_cpu()
387 DO_STATS(dev->archdata.dmabounce->bounce_count++); in __dmabounce_sync_for_cpu()
390 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", in __dmabounce_sync_for_cpu()
391 __func__, buf->safe + off, buf->ptr + off, sz); in __dmabounce_sync_for_cpu()
392 memcpy(buf->ptr + off, buf->safe + off, sz); in __dmabounce_sync_for_cpu()
412 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", in __dmabounce_sync_for_device()
419 off = addr - buf->safe_dma_addr; in __dmabounce_sync_for_device()
421 BUG_ON(buf->direction != dir); in __dmabounce_sync_for_device()
423 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", in __dmabounce_sync_for_device()
424 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, in __dmabounce_sync_for_device()
425 buf->safe, buf->safe_dma_addr); in __dmabounce_sync_for_device()
427 DO_STATS(dev->archdata.dmabounce->bounce_count++); in __dmabounce_sync_for_device()
430 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", in __dmabounce_sync_for_device()
431 __func__,buf->ptr + off, buf->safe + off, sz); in __dmabounce_sync_for_device()
432 memcpy(buf->safe + off, buf->ptr + off, sz); in __dmabounce_sync_for_device()
448 if (dev->archdata.dmabounce) in dmabounce_dma_supported()
473 pool->size = size; in dmabounce_init_pool()
474 DO_STATS(pool->allocs = 0); in dmabounce_init_pool()
475 pool->pool = dma_pool_create(name, dev, size, in dmabounce_init_pool()
477 0 /* no page-crossing issues */); in dmabounce_init_pool()
479 return pool->pool ? 0 : -ENOMEM; in dmabounce_init_pool()
493 return -ENOMEM; in dmabounce_register_dev()
496 ret = dmabounce_init_pool(&device_info->small, dev, in dmabounce_register_dev()
500 "dmabounce: could not allocate DMA pool for %ld byte objects\n", in dmabounce_register_dev()
506 ret = dmabounce_init_pool(&device_info->large, dev, in dmabounce_register_dev()
511 "dmabounce: could not allocate DMA pool for %ld byte objects\n", in dmabounce_register_dev()
517 device_info->dev = dev; in dmabounce_register_dev()
518 INIT_LIST_HEAD(&device_info->safe_buffers); in dmabounce_register_dev()
519 rwlock_init(&device_info->lock); in dmabounce_register_dev()
520 device_info->needs_bounce = needs_bounce_fn; in dmabounce_register_dev()
523 device_info->total_allocs = 0; in dmabounce_register_dev()
524 device_info->map_op_count = 0; in dmabounce_register_dev()
525 device_info->bounce_count = 0; in dmabounce_register_dev()
526 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); in dmabounce_register_dev()
529 dev->archdata.dmabounce = device_info; in dmabounce_register_dev()
537 dma_pool_destroy(device_info->small.pool); in dmabounce_register_dev()
546 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; in dmabounce_unregister_dev()
548 dev->archdata.dmabounce = NULL; in dmabounce_unregister_dev()
558 if (!list_empty(&device_info->safe_buffers)) { in dmabounce_unregister_dev()
564 if (device_info->small.pool) in dmabounce_unregister_dev()
565 dma_pool_destroy(device_info->small.pool); in dmabounce_unregister_dev()
566 if (device_info->large.pool) in dmabounce_unregister_dev()
567 dma_pool_destroy(device_info->large.pool); in dmabounce_unregister_dev()
570 if (device_info->attr_res == 0) in dmabounce_unregister_dev()
581 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA window…