1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
3
4 /*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
11 #include <linux/dma-debug.h>
12
13 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
14
15 #define DMA_ERROR_CODE 0
16
17 extern struct dma_map_ops *dma_ops;
18 extern struct ia64_machine_vector ia64_mv;
19 extern void set_iommu_machvec(void);
20
21 extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22 enum dma_data_direction);
23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24 enum dma_data_direction);
25
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * daddr,gfp_t gfp)26 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
27 dma_addr_t *daddr, gfp_t gfp)
28 {
29 struct dma_map_ops *ops = platform_dma_get_ops(dev);
30 void *caddr;
31
32 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
33 debug_dma_alloc_coherent(dev, size, *daddr, caddr);
34 return caddr;
35 }
36
dma_free_coherent(struct device * dev,size_t size,void * caddr,dma_addr_t daddr)37 static inline void dma_free_coherent(struct device *dev, size_t size,
38 void *caddr, dma_addr_t daddr)
39 {
40 struct dma_map_ops *ops = platform_dma_get_ops(dev);
41 debug_dma_free_coherent(dev, size, caddr, daddr);
42 ops->free_coherent(dev, size, caddr, daddr);
43 }
44
45 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
46 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
47
48 #define get_dma_ops(dev) platform_dma_get_ops(dev)
49
50 #include <asm-generic/dma-mapping-common.h>
51
dma_mapping_error(struct device * dev,dma_addr_t daddr)52 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
53 {
54 struct dma_map_ops *ops = platform_dma_get_ops(dev);
55 return ops->mapping_error(dev, daddr);
56 }
57
dma_supported(struct device * dev,u64 mask)58 static inline int dma_supported(struct device *dev, u64 mask)
59 {
60 struct dma_map_ops *ops = platform_dma_get_ops(dev);
61 return ops->dma_supported(dev, mask);
62 }
63
64 static inline int
dma_set_mask(struct device * dev,u64 mask)65 dma_set_mask (struct device *dev, u64 mask)
66 {
67 if (!dev->dma_mask || !dma_supported(dev, mask))
68 return -EIO;
69 *dev->dma_mask = mask;
70 return 0;
71 }
72
dma_capable(struct device * dev,dma_addr_t addr,size_t size)73 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
74 {
75 if (!dev->dma_mask)
76 return 0;
77
78 return addr + size - 1 <= *dev->dma_mask;
79 }
80
phys_to_dma(struct device * dev,phys_addr_t paddr)81 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
82 {
83 return paddr;
84 }
85
dma_to_phys(struct device * dev,dma_addr_t daddr)86 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
87 {
88 return daddr;
89 }
90
91 static inline void
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction dir)92 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
93 enum dma_data_direction dir)
94 {
95 /*
96 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
97 * ensure that dma_cache_sync() enforces order, hence the mb().
98 */
99 mb();
100 }
101
102 #endif /* _ASM_IA64_DMA_MAPPING_H */
103