1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Version from mach-generic modified to support PowerTV port
7  * Portions Copyright (C) 2009  Cisco Systems, Inc.
8  * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
9  *
10  */
11 
12 #ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
13 #define __ASM_MACH_POWERTV_DMA_COHERENCE_H
14 
15 #include <linux/sched.h>
16 #include <linux/device.h>
17 #include <asm/mach-powertv/asic.h>
18 
is_kseg2(void * addr)19 static inline bool is_kseg2(void *addr)
20 {
21 	return (unsigned long)addr >= KSEG2;
22 }
23 
virt_to_phys_from_pte(void * addr)24 static inline unsigned long virt_to_phys_from_pte(void *addr)
25 {
26 	pgd_t *pgd;
27 	pud_t *pud;
28 	pmd_t *pmd;
29 	pte_t *ptep, pte;
30 
31 	unsigned long virt_addr = (unsigned long)addr;
32 	unsigned long phys_addr = 0UL;
33 
34 	/* get the page global directory. */
35 	pgd = pgd_offset_k(virt_addr);
36 
37 	if (!pgd_none(*pgd)) {
38 		/* get the page upper directory */
39 		pud = pud_offset(pgd, virt_addr);
40 		if (!pud_none(*pud)) {
41 			/* get the page middle directory */
42 			pmd = pmd_offset(pud, virt_addr);
43 			if (!pmd_none(*pmd)) {
44 				/* get a pointer to the page table entry */
45 				ptep = pte_offset(pmd, virt_addr);
46 				pte = *ptep;
47 				/* check for a valid page */
48 				if (pte_present(pte)) {
49 					/* get the physical address the page is
50 					 * referring to */
51 					phys_addr = (unsigned long)
52 						page_to_phys(pte_page(pte));
53 					/* add the offset within the page */
54 					phys_addr |= (virt_addr & ~PAGE_MASK);
55 				}
56 			}
57 		}
58 	}
59 
60 	return phys_addr;
61 }
62 
plat_map_dma_mem(struct device * dev,void * addr,size_t size)63 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
64 	size_t size)
65 {
66 	if (is_kseg2(addr))
67 		return phys_to_dma(virt_to_phys_from_pte(addr));
68 	else
69 		return phys_to_dma(virt_to_phys(addr));
70 }
71 
plat_map_dma_mem_page(struct device * dev,struct page * page)72 static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
73 	struct page *page)
74 {
75 	return phys_to_dma(page_to_phys(page));
76 }
77 
plat_dma_addr_to_phys(struct device * dev,dma_addr_t dma_addr)78 static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
79 	dma_addr_t dma_addr)
80 {
81 	return dma_to_phys(dma_addr);
82 }
83 
plat_unmap_dma_mem(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction)84 static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
85 	size_t size, enum dma_data_direction direction)
86 {
87 }
88 
plat_dma_supported(struct device * dev,u64 mask)89 static inline int plat_dma_supported(struct device *dev, u64 mask)
90 {
91 	/*
92 	 * we fall back to GFP_DMA when the mask isn't all 1s,
93 	 * so we can't guarantee allocations that must be
94 	 * within a tighter range than GFP_DMA..
95 	 */
96 	if (mask < DMA_BIT_MASK(24))
97 		return 0;
98 
99 	return 1;
100 }
101 
plat_extra_sync_for_device(struct device * dev)102 static inline void plat_extra_sync_for_device(struct device *dev)
103 {
104 }
105 
plat_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)106 static inline int plat_dma_mapping_error(struct device *dev,
107 					 dma_addr_t dma_addr)
108 {
109 	return 0;
110 }
111 
plat_device_is_coherent(struct device * dev)112 static inline int plat_device_is_coherent(struct device *dev)
113 {
114 	return 0;
115 }
116 
117 #endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */
118