1 #ifndef _ASM_IA64_PCI_H
2 #define _ASM_IA64_PCI_H
3
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/spinlock.h>
7 #include <linux/string.h>
8 #include <linux/types.h>
9
10 #include <asm/io.h>
11 #include <asm/scatterlist.h>
12 #include <asm/hw_irq.h>
13
14 /*
15 * Can be used to override the logic in pci_scan_bus for skipping already-configured bus
16 * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the
17 * loader.
18 */
19 #define pcibios_assign_all_busses() 0
20
21 #define PCIBIOS_MIN_IO 0x1000
22 #define PCIBIOS_MIN_MEM 0x10000000
23
24 void pcibios_config_init(void);
25
26 struct pci_dev;
27
28 /*
29 * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
30 * correspondence between device bus addresses and CPU physical addresses.
31 * Platforms with a hardware I/O MMU _must_ turn this off to suppress the
32 * bounce buffer handling code in the block and network device layers.
33 * Platforms with separate bus address spaces _must_ turn this off and provide
34 * a device DMA mapping implementation that takes care of the necessary
35 * address translation.
36 *
37 * For now, the ia64 platforms which may have separate/multiple bus address
38 * spaces all have I/O MMUs which support the merging of physically
39 * discontiguous buffers, so we can use that as the sole factor to determine
40 * the setting of PCI_DMA_BUS_IS_PHYS.
41 */
42 extern unsigned long ia64_max_iommu_merge_mask;
43 #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
44
45 static inline void
pcibios_penalize_isa_irq(int irq,int active)46 pcibios_penalize_isa_irq (int irq, int active)
47 {
48 /* We don't do dynamic PCI IRQ allocation */
49 }
50
51 #include <asm-generic/pci-dma-compat.h>
52
53 #ifdef CONFIG_PCI
pci_dma_burst_advice(struct pci_dev * pdev,enum pci_dma_burst_strategy * strat,unsigned long * strategy_parameter)54 static inline void pci_dma_burst_advice(struct pci_dev *pdev,
55 enum pci_dma_burst_strategy *strat,
56 unsigned long *strategy_parameter)
57 {
58 unsigned long cacheline_size;
59 u8 byte;
60
61 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
62 if (byte == 0)
63 cacheline_size = 1024;
64 else
65 cacheline_size = (int) byte * 4;
66
67 *strat = PCI_DMA_BURST_MULTIPLE;
68 *strategy_parameter = cacheline_size;
69 }
70 #endif
71
72 #define HAVE_PCI_MMAP
73 extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
74 enum pci_mmap_state mmap_state, int write_combine);
75 #define HAVE_PCI_LEGACY
76 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
77 struct vm_area_struct *vma,
78 enum pci_mmap_state mmap_state);
79
80 #define pci_get_legacy_mem platform_pci_get_legacy_mem
81 #define pci_legacy_read platform_pci_legacy_read
82 #define pci_legacy_write platform_pci_legacy_write
83
84 struct pci_window {
85 struct resource resource;
86 u64 offset;
87 };
88
89 struct pci_controller {
90 void *acpi_handle;
91 void *iommu;
92 int segment;
93 int node; /* nearest node with memory or -1 for global allocation */
94
95 unsigned int windows;
96 struct pci_window *window;
97
98 void *platform_data;
99 };
100
101 #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
102 #define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment)
103
104 extern struct pci_ops pci_root_ops;
105
pci_proc_domain(struct pci_bus * bus)106 static inline int pci_proc_domain(struct pci_bus *bus)
107 {
108 return (pci_domain_nr(bus) != 0);
109 }
110
111 extern void pcibios_resource_to_bus(struct pci_dev *dev,
112 struct pci_bus_region *region, struct resource *res);
113
114 extern void pcibios_bus_to_resource(struct pci_dev *dev,
115 struct resource *res, struct pci_bus_region *region);
116
117 static inline struct resource *
pcibios_select_root(struct pci_dev * pdev,struct resource * res)118 pcibios_select_root(struct pci_dev *pdev, struct resource *res)
119 {
120 struct resource *root = NULL;
121
122 if (res->flags & IORESOURCE_IO)
123 root = &ioport_resource;
124 if (res->flags & IORESOURCE_MEM)
125 root = &iomem_resource;
126
127 return root;
128 }
129
130 #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
pci_get_legacy_ide_irq(struct pci_dev * dev,int channel)131 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
132 {
133 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
134 }
135
136 #ifdef CONFIG_INTEL_IOMMU
137 extern void pci_iommu_alloc(void);
138 #endif
139 #endif /* _ASM_IA64_PCI_H */
140