xref: /linux/drivers/char/agp/intel-gtt.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17 
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/kernel.h>
21 #include <linux/pagemap.h>
22 #include <linux/agp_backend.h>
23 #include <linux/iommu.h>
24 #include <linux/delay.h>
25 #include <asm/smp.h>
26 #include "agp.h"
27 #include "intel-agp.h"
28 #include <drm/intel/intel-gtt.h>
29 #include <asm/set_memory.h>
30 
31 /*
32  * If we have Intel graphics, we're not going to have anything other than
33  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
34  * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
35  * Only newer chipsets need to bother with this, of course.
36  */
37 #ifdef CONFIG_INTEL_IOMMU
38 #define USE_PCI_DMA_API 1
39 #else
40 #define USE_PCI_DMA_API 0
41 #endif
42 
43 struct intel_gtt_driver {
44 	unsigned int gen : 8;
45 	unsigned int is_g33 : 1;
46 	unsigned int is_pineview : 1;
47 	unsigned int is_ironlake : 1;
48 	unsigned int has_pgtbl_enable : 1;
49 	unsigned int dma_mask_size : 8;
50 	/* Chipset specific GTT setup */
51 	int (*setup)(void);
52 	/* This should undo anything done in ->setup() save the unmapping
53 	 * of the mmio register file, that's done in the generic code. */
54 	void (*cleanup)(void);
55 	void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
56 	dma_addr_t (*read_entry)(unsigned int entry, bool *is_present, bool *is_local);
57 	/* Flags is a more or less chipset specific opaque value.
58 	 * For chipsets that need to support old ums (non-gem) code, this
59 	 * needs to be identical to the various supported agp memory types! */
60 	bool (*check_flags)(unsigned int flags);
61 	void (*chipset_flush)(void);
62 };
63 
64 static struct _intel_private {
65 	const struct intel_gtt_driver *driver;
66 	struct pci_dev *pcidev;	/* device one */
67 	struct pci_dev *bridge_dev;
68 	u8 __iomem *registers;
69 	phys_addr_t gtt_phys_addr;
70 	u32 PGETBL_save;
71 	u32 __iomem *gtt;		/* I915G */
72 	bool clear_fake_agp; /* on first access via agp, fill with scratch */
73 	int num_dcache_entries;
74 	void __iomem *i9xx_flush_page;
75 	char *i81x_gtt_table;
76 	struct resource ifp_resource;
77 	int resource_valid;
78 	struct page *scratch_page;
79 	phys_addr_t scratch_page_dma;
80 	int refcount;
81 	/* Whether i915 needs to use the dmar apis or not. */
82 	unsigned int needs_dmar : 1;
83 	phys_addr_t gma_bus_addr;
84 	/*  Size of memory reserved for graphics by the BIOS */
85 	resource_size_t stolen_size;
86 	/* Total number of gtt entries. */
87 	unsigned int gtt_total_entries;
88 	/* Part of the gtt that is mappable by the cpu, for those chips where
89 	 * this is not the full gtt. */
90 	unsigned int gtt_mappable_entries;
91 } intel_private;
92 
93 #define INTEL_GTT_GEN	intel_private.driver->gen
94 #define IS_G33		intel_private.driver->is_g33
95 #define IS_PINEVIEW	intel_private.driver->is_pineview
96 #define IS_IRONLAKE	intel_private.driver->is_ironlake
97 #define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
98 
99 #if IS_ENABLED(CONFIG_AGP_INTEL)
intel_gtt_map_memory(struct page ** pages,unsigned int num_entries,struct sg_table * st)100 static int intel_gtt_map_memory(struct page **pages,
101 				unsigned int num_entries,
102 				struct sg_table *st)
103 {
104 	struct scatterlist *sg;
105 	int i;
106 
107 	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
108 
109 	if (sg_alloc_table(st, num_entries, GFP_KERNEL))
110 		goto err;
111 
112 	for_each_sg(st->sgl, sg, num_entries, i)
113 		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
114 
115 	if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
116 			DMA_BIDIRECTIONAL))
117 		goto err;
118 
119 	return 0;
120 
121 err:
122 	sg_free_table(st);
123 	return -ENOMEM;
124 }
125 
intel_gtt_unmap_memory(struct scatterlist * sg_list,int num_sg)126 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
127 {
128 	struct sg_table st;
129 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
130 
131 	dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
132 		     DMA_BIDIRECTIONAL);
133 
134 	st.sgl = sg_list;
135 	st.orig_nents = st.nents = num_sg;
136 
137 	sg_free_table(&st);
138 }
139 
intel_fake_agp_enable(struct agp_bridge_data * bridge,u32 mode)140 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
141 {
142 	return;
143 }
144 
145 /* Exists to support ARGB cursors */
i8xx_alloc_pages(void)146 static struct page *i8xx_alloc_pages(void)
147 {
148 	struct page *page;
149 
150 	page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
151 	if (page == NULL)
152 		return NULL;
153 
154 	if (set_pages_uc(page, 4) < 0) {
155 		set_pages_wb(page, 4);
156 		__free_pages(page, 2);
157 		return NULL;
158 	}
159 	atomic_inc(&agp_bridge->current_memory_agp);
160 	return page;
161 }
162 
i8xx_destroy_pages(struct page * page)163 static void i8xx_destroy_pages(struct page *page)
164 {
165 	if (page == NULL)
166 		return;
167 
168 	set_pages_wb(page, 4);
169 	__free_pages(page, 2);
170 	atomic_dec(&agp_bridge->current_memory_agp);
171 }
172 #endif
173 
174 #define I810_GTT_ORDER 4
i810_setup(void)175 static int i810_setup(void)
176 {
177 	phys_addr_t reg_addr;
178 	char *gtt_table;
179 
180 	/* i81x does not preallocate the gtt. It's always 64kb in size. */
181 	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
182 	if (gtt_table == NULL)
183 		return -ENOMEM;
184 	intel_private.i81x_gtt_table = gtt_table;
185 
186 	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
187 
188 	intel_private.registers = ioremap(reg_addr, KB(64));
189 	if (!intel_private.registers)
190 		return -ENOMEM;
191 
192 	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
193 	       intel_private.registers+I810_PGETBL_CTL);
194 
195 	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
196 
197 	if ((readl(intel_private.registers+I810_DRAM_CTL)
198 		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
199 		dev_info(&intel_private.pcidev->dev,
200 			 "detected 4MB dedicated video ram\n");
201 		intel_private.num_dcache_entries = 1024;
202 	}
203 
204 	return 0;
205 }
206 
i810_cleanup(void)207 static void i810_cleanup(void)
208 {
209 	writel(0, intel_private.registers+I810_PGETBL_CTL);
210 	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
211 }
212 
213 #if IS_ENABLED(CONFIG_AGP_INTEL)
i810_insert_dcache_entries(struct agp_memory * mem,off_t pg_start,int type)214 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
215 				      int type)
216 {
217 	int i;
218 
219 	if ((pg_start + mem->page_count)
220 			> intel_private.num_dcache_entries)
221 		return -EINVAL;
222 
223 	if (!mem->is_flushed)
224 		global_cache_flush();
225 
226 	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
227 		dma_addr_t addr = i << PAGE_SHIFT;
228 		intel_private.driver->write_entry(addr,
229 						  i, type);
230 	}
231 	wmb();
232 
233 	return 0;
234 }
235 
236 /*
237  * The i810/i830 requires a physical address to program its mouse
238  * pointer into hardware.
239  * However the Xserver still writes to it through the agp aperture.
240  */
alloc_agpphysmem_i8xx(size_t pg_count,int type)241 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
242 {
243 	struct agp_memory *new;
244 	struct page *page;
245 
246 	switch (pg_count) {
247 	case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
248 		break;
249 	case 4:
250 		/* kludge to get 4 physical pages for ARGB cursor */
251 		page = i8xx_alloc_pages();
252 		break;
253 	default:
254 		return NULL;
255 	}
256 
257 	if (page == NULL)
258 		return NULL;
259 
260 	new = agp_create_memory(pg_count);
261 	if (new == NULL)
262 		return NULL;
263 
264 	new->pages[0] = page;
265 	if (pg_count == 4) {
266 		/* kludge to get 4 physical pages for ARGB cursor */
267 		new->pages[1] = new->pages[0] + 1;
268 		new->pages[2] = new->pages[1] + 1;
269 		new->pages[3] = new->pages[2] + 1;
270 	}
271 	new->page_count = pg_count;
272 	new->num_scratch_pages = pg_count;
273 	new->type = AGP_PHYS_MEMORY;
274 	new->physical = page_to_phys(new->pages[0]);
275 	return new;
276 }
277 
intel_i810_free_by_type(struct agp_memory * curr)278 static void intel_i810_free_by_type(struct agp_memory *curr)
279 {
280 	agp_free_key(curr->key);
281 	if (curr->type == AGP_PHYS_MEMORY) {
282 		if (curr->page_count == 4)
283 			i8xx_destroy_pages(curr->pages[0]);
284 		else {
285 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
286 							     AGP_PAGE_DESTROY_UNMAP);
287 			agp_bridge->driver->agp_destroy_page(curr->pages[0],
288 							     AGP_PAGE_DESTROY_FREE);
289 		}
290 		agp_free_page_array(curr);
291 	}
292 	kfree(curr);
293 }
294 #endif
295 
intel_gtt_setup_scratch_page(void)296 static int intel_gtt_setup_scratch_page(void)
297 {
298 	struct page *page;
299 	dma_addr_t dma_addr;
300 
301 	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
302 	if (page == NULL)
303 		return -ENOMEM;
304 	set_pages_uc(page, 1);
305 
306 	if (intel_private.needs_dmar) {
307 		dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
308 					PAGE_SIZE, DMA_BIDIRECTIONAL);
309 		if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
310 			__free_page(page);
311 			return -EINVAL;
312 		}
313 
314 		intel_private.scratch_page_dma = dma_addr;
315 	} else
316 		intel_private.scratch_page_dma = page_to_phys(page);
317 
318 	intel_private.scratch_page = page;
319 
320 	return 0;
321 }
322 
i810_write_entry(dma_addr_t addr,unsigned int entry,unsigned int flags)323 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
324 			     unsigned int flags)
325 {
326 	u32 pte_flags = I810_PTE_VALID;
327 
328 	switch (flags) {
329 	case AGP_DCACHE_MEMORY:
330 		pte_flags |= I810_PTE_LOCAL;
331 		break;
332 	case AGP_USER_CACHED_MEMORY:
333 		pte_flags |= I830_PTE_SYSTEM_CACHED;
334 		break;
335 	}
336 
337 	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
338 }
339 
i810_read_entry(unsigned int entry,bool * is_present,bool * is_local)340 static dma_addr_t i810_read_entry(unsigned int entry,
341 				  bool *is_present, bool *is_local)
342 {
343 	u32 val;
344 
345 	val = readl(intel_private.gtt + entry);
346 
347 	*is_present = val & I810_PTE_VALID;
348 	*is_local = val & I810_PTE_LOCAL;
349 
350 	return val & ~0xfff;
351 }
352 
intel_gtt_stolen_size(void)353 static resource_size_t intel_gtt_stolen_size(void)
354 {
355 	u16 gmch_ctrl;
356 	u8 rdct;
357 	int local = 0;
358 	static const int ddt[4] = { 0, 16, 32, 64 };
359 	resource_size_t stolen_size = 0;
360 
361 	if (INTEL_GTT_GEN == 1)
362 		return 0; /* no stolen mem on i81x */
363 
364 	pci_read_config_word(intel_private.bridge_dev,
365 			     I830_GMCH_CTRL, &gmch_ctrl);
366 
367 	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
368 	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
369 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
370 		case I830_GMCH_GMS_STOLEN_512:
371 			stolen_size = KB(512);
372 			break;
373 		case I830_GMCH_GMS_STOLEN_1024:
374 			stolen_size = MB(1);
375 			break;
376 		case I830_GMCH_GMS_STOLEN_8192:
377 			stolen_size = MB(8);
378 			break;
379 		case I830_GMCH_GMS_LOCAL:
380 			rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
381 			stolen_size = (I830_RDRAM_ND(rdct) + 1) *
382 					MB(ddt[I830_RDRAM_DDT(rdct)]);
383 			local = 1;
384 			break;
385 		default:
386 			stolen_size = 0;
387 			break;
388 		}
389 	} else {
390 		switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
391 		case I855_GMCH_GMS_STOLEN_1M:
392 			stolen_size = MB(1);
393 			break;
394 		case I855_GMCH_GMS_STOLEN_4M:
395 			stolen_size = MB(4);
396 			break;
397 		case I855_GMCH_GMS_STOLEN_8M:
398 			stolen_size = MB(8);
399 			break;
400 		case I855_GMCH_GMS_STOLEN_16M:
401 			stolen_size = MB(16);
402 			break;
403 		case I855_GMCH_GMS_STOLEN_32M:
404 			stolen_size = MB(32);
405 			break;
406 		case I915_GMCH_GMS_STOLEN_48M:
407 			stolen_size = MB(48);
408 			break;
409 		case I915_GMCH_GMS_STOLEN_64M:
410 			stolen_size = MB(64);
411 			break;
412 		case G33_GMCH_GMS_STOLEN_128M:
413 			stolen_size = MB(128);
414 			break;
415 		case G33_GMCH_GMS_STOLEN_256M:
416 			stolen_size = MB(256);
417 			break;
418 		case INTEL_GMCH_GMS_STOLEN_96M:
419 			stolen_size = MB(96);
420 			break;
421 		case INTEL_GMCH_GMS_STOLEN_160M:
422 			stolen_size = MB(160);
423 			break;
424 		case INTEL_GMCH_GMS_STOLEN_224M:
425 			stolen_size = MB(224);
426 			break;
427 		case INTEL_GMCH_GMS_STOLEN_352M:
428 			stolen_size = MB(352);
429 			break;
430 		default:
431 			stolen_size = 0;
432 			break;
433 		}
434 	}
435 
436 	if (stolen_size > 0) {
437 		dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
438 		       (u64)stolen_size / KB(1), local ? "local" : "stolen");
439 	} else {
440 		dev_info(&intel_private.bridge_dev->dev,
441 		       "no pre-allocated video memory detected\n");
442 		stolen_size = 0;
443 	}
444 
445 	return stolen_size;
446 }
447 
i965_adjust_pgetbl_size(unsigned int size_flag)448 static void i965_adjust_pgetbl_size(unsigned int size_flag)
449 {
450 	u32 pgetbl_ctl, pgetbl_ctl2;
451 
452 	/* ensure that ppgtt is disabled */
453 	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
454 	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
455 	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
456 
457 	/* write the new ggtt size */
458 	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
459 	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
460 	pgetbl_ctl |= size_flag;
461 	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
462 }
463 
i965_gtt_total_entries(void)464 static unsigned int i965_gtt_total_entries(void)
465 {
466 	int size;
467 	u32 pgetbl_ctl;
468 	u16 gmch_ctl;
469 
470 	pci_read_config_word(intel_private.bridge_dev,
471 			     I830_GMCH_CTRL, &gmch_ctl);
472 
473 	if (INTEL_GTT_GEN == 5) {
474 		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
475 		case G4x_GMCH_SIZE_1M:
476 		case G4x_GMCH_SIZE_VT_1M:
477 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
478 			break;
479 		case G4x_GMCH_SIZE_VT_1_5M:
480 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
481 			break;
482 		case G4x_GMCH_SIZE_2M:
483 		case G4x_GMCH_SIZE_VT_2M:
484 			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
485 			break;
486 		}
487 	}
488 
489 	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
490 
491 	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
492 	case I965_PGETBL_SIZE_128KB:
493 		size = KB(128);
494 		break;
495 	case I965_PGETBL_SIZE_256KB:
496 		size = KB(256);
497 		break;
498 	case I965_PGETBL_SIZE_512KB:
499 		size = KB(512);
500 		break;
501 	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
502 	case I965_PGETBL_SIZE_1MB:
503 		size = KB(1024);
504 		break;
505 	case I965_PGETBL_SIZE_2MB:
506 		size = KB(2048);
507 		break;
508 	case I965_PGETBL_SIZE_1_5MB:
509 		size = KB(1024 + 512);
510 		break;
511 	default:
512 		dev_info(&intel_private.pcidev->dev,
513 			 "unknown page table size, assuming 512KB\n");
514 		size = KB(512);
515 	}
516 
517 	return size/4;
518 }
519 
intel_gtt_total_entries(void)520 static unsigned int intel_gtt_total_entries(void)
521 {
522 	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
523 		return i965_gtt_total_entries();
524 	else {
525 		/* On previous hardware, the GTT size was just what was
526 		 * required to map the aperture.
527 		 */
528 		return intel_private.gtt_mappable_entries;
529 	}
530 }
531 
intel_gtt_mappable_entries(void)532 static unsigned int intel_gtt_mappable_entries(void)
533 {
534 	unsigned int aperture_size;
535 
536 	if (INTEL_GTT_GEN == 1) {
537 		u32 smram_miscc;
538 
539 		pci_read_config_dword(intel_private.bridge_dev,
540 				      I810_SMRAM_MISCC, &smram_miscc);
541 
542 		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
543 				== I810_GFX_MEM_WIN_32M)
544 			aperture_size = MB(32);
545 		else
546 			aperture_size = MB(64);
547 	} else if (INTEL_GTT_GEN == 2) {
548 		u16 gmch_ctrl;
549 
550 		pci_read_config_word(intel_private.bridge_dev,
551 				     I830_GMCH_CTRL, &gmch_ctrl);
552 
553 		if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
554 			aperture_size = MB(64);
555 		else
556 			aperture_size = MB(128);
557 	} else {
558 		/* 9xx supports large sizes, just look at the length */
559 		aperture_size = pci_resource_len(intel_private.pcidev, 2);
560 	}
561 
562 	return aperture_size >> PAGE_SHIFT;
563 }
564 
intel_gtt_teardown_scratch_page(void)565 static void intel_gtt_teardown_scratch_page(void)
566 {
567 	set_pages_wb(intel_private.scratch_page, 1);
568 	if (intel_private.needs_dmar)
569 		dma_unmap_page(&intel_private.pcidev->dev,
570 			       intel_private.scratch_page_dma, PAGE_SIZE,
571 			       DMA_BIDIRECTIONAL);
572 	__free_page(intel_private.scratch_page);
573 }
574 
intel_gtt_cleanup(void)575 static void intel_gtt_cleanup(void)
576 {
577 	intel_private.driver->cleanup();
578 
579 	iounmap(intel_private.gtt);
580 	iounmap(intel_private.registers);
581 
582 	intel_gtt_teardown_scratch_page();
583 }
584 
585 /* Certain Gen5 chipsets require require idling the GPU before
586  * unmapping anything from the GTT when VT-d is enabled.
587  */
needs_ilk_vtd_wa(void)588 static inline int needs_ilk_vtd_wa(void)
589 {
590 	const unsigned short gpu_devid = intel_private.pcidev->device;
591 
592 	/*
593 	 * Query iommu subsystem to see if we need the workaround. Presumably
594 	 * that was loaded first.
595 	 */
596 	return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
597 		 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
598 		device_iommu_mapped(&intel_private.pcidev->dev));
599 }
600 
intel_gtt_can_wc(void)601 static bool intel_gtt_can_wc(void)
602 {
603 	if (INTEL_GTT_GEN <= 2)
604 		return false;
605 
606 	if (INTEL_GTT_GEN >= 6)
607 		return false;
608 
609 	/* Reports of major corruption with ILK vt'd enabled */
610 	if (needs_ilk_vtd_wa())
611 		return false;
612 
613 	return true;
614 }
615 
intel_gtt_init(void)616 static int intel_gtt_init(void)
617 {
618 	u32 gtt_map_size;
619 	int ret, bar;
620 
621 	ret = intel_private.driver->setup();
622 	if (ret != 0)
623 		return ret;
624 
625 	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
626 	intel_private.gtt_total_entries = intel_gtt_total_entries();
627 
628 	/* save the PGETBL reg for resume */
629 	intel_private.PGETBL_save =
630 		readl(intel_private.registers+I810_PGETBL_CTL)
631 			& ~I810_PGETBL_ENABLED;
632 	/* we only ever restore the register when enabling the PGTBL... */
633 	if (HAS_PGTBL_EN)
634 		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
635 
636 	dev_info(&intel_private.bridge_dev->dev,
637 			"detected gtt size: %dK total, %dK mappable\n",
638 			intel_private.gtt_total_entries * 4,
639 			intel_private.gtt_mappable_entries * 4);
640 
641 	gtt_map_size = intel_private.gtt_total_entries * 4;
642 
643 	intel_private.gtt = NULL;
644 	if (intel_gtt_can_wc())
645 		intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
646 					       gtt_map_size);
647 	if (intel_private.gtt == NULL)
648 		intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
649 					    gtt_map_size);
650 	if (intel_private.gtt == NULL) {
651 		intel_private.driver->cleanup();
652 		iounmap(intel_private.registers);
653 		return -ENOMEM;
654 	}
655 
656 #if IS_ENABLED(CONFIG_AGP_INTEL)
657 	global_cache_flush();   /* FIXME: ? */
658 #endif
659 
660 	intel_private.stolen_size = intel_gtt_stolen_size();
661 
662 	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
663 
664 	ret = intel_gtt_setup_scratch_page();
665 	if (ret != 0) {
666 		intel_gtt_cleanup();
667 		return ret;
668 	}
669 
670 	if (INTEL_GTT_GEN <= 2)
671 		bar = I810_GMADR_BAR;
672 	else
673 		bar = I915_GMADR_BAR;
674 
675 	intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
676 	return 0;
677 }
678 
679 #if IS_ENABLED(CONFIG_AGP_INTEL)
680 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
681 	{32, 8192, 3},
682 	{64, 16384, 4},
683 	{128, 32768, 5},
684 	{256, 65536, 6},
685 	{512, 131072, 7},
686 };
687 
intel_fake_agp_fetch_size(void)688 static int intel_fake_agp_fetch_size(void)
689 {
690 	int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
691 	unsigned int aper_size;
692 	int i;
693 
694 	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
695 
696 	for (i = 0; i < num_sizes; i++) {
697 		if (aper_size == intel_fake_agp_sizes[i].size) {
698 			agp_bridge->current_size =
699 				(void *) (intel_fake_agp_sizes + i);
700 			return aper_size;
701 		}
702 	}
703 
704 	return 0;
705 }
706 #endif
707 
i830_cleanup(void)708 static void i830_cleanup(void)
709 {
710 }
711 
712 /* The chipset_flush interface needs to get data that has already been
713  * flushed out of the CPU all the way out to main memory, because the GPU
714  * doesn't snoop those buffers.
715  *
716  * The 8xx series doesn't have the same lovely interface for flushing the
717  * chipset write buffers that the later chips do. According to the 865
718  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
719  * that buffer out, we just fill 1KB and clflush it out, on the assumption
720  * that it'll push whatever was in there out.  It appears to work.
721  */
i830_chipset_flush(void)722 static void i830_chipset_flush(void)
723 {
724 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
725 
726 	/* Forcibly evict everything from the CPU write buffers.
727 	 * clflush appears to be insufficient.
728 	 */
729 	wbinvd_on_all_cpus();
730 
731 	/* Now we've only seen documents for this magic bit on 855GM,
732 	 * we hope it exists for the other gen2 chipsets...
733 	 *
734 	 * Also works as advertised on my 845G.
735 	 */
736 	writel(readl(intel_private.registers+I830_HIC) | (1<<31),
737 	       intel_private.registers+I830_HIC);
738 
739 	while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
740 		if (time_after(jiffies, timeout))
741 			break;
742 
743 		udelay(50);
744 	}
745 }
746 
i830_write_entry(dma_addr_t addr,unsigned int entry,unsigned int flags)747 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
748 			     unsigned int flags)
749 {
750 	u32 pte_flags = I810_PTE_VALID;
751 
752 	if (flags ==  AGP_USER_CACHED_MEMORY)
753 		pte_flags |= I830_PTE_SYSTEM_CACHED;
754 
755 	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
756 }
757 
i830_read_entry(unsigned int entry,bool * is_present,bool * is_local)758 static dma_addr_t i830_read_entry(unsigned int entry,
759 				  bool *is_present, bool *is_local)
760 {
761 	u32 val;
762 
763 	val = readl(intel_private.gtt + entry);
764 
765 	*is_present = val & I810_PTE_VALID;
766 	*is_local = false;
767 
768 	return val & ~0xfff;
769 }
770 
intel_gmch_enable_gtt(void)771 bool intel_gmch_enable_gtt(void)
772 {
773 	u8 __iomem *reg;
774 
775 	if (INTEL_GTT_GEN == 2) {
776 		u16 gmch_ctrl;
777 
778 		pci_read_config_word(intel_private.bridge_dev,
779 				     I830_GMCH_CTRL, &gmch_ctrl);
780 		gmch_ctrl |= I830_GMCH_ENABLED;
781 		pci_write_config_word(intel_private.bridge_dev,
782 				      I830_GMCH_CTRL, gmch_ctrl);
783 
784 		pci_read_config_word(intel_private.bridge_dev,
785 				     I830_GMCH_CTRL, &gmch_ctrl);
786 		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
787 			dev_err(&intel_private.pcidev->dev,
788 				"failed to enable the GTT: GMCH_CTRL=%x\n",
789 				gmch_ctrl);
790 			return false;
791 		}
792 	}
793 
794 	/* On the resume path we may be adjusting the PGTBL value, so
795 	 * be paranoid and flush all chipset write buffers...
796 	 */
797 	if (INTEL_GTT_GEN >= 3)
798 		writel(0, intel_private.registers+GFX_FLSH_CNTL);
799 
800 	reg = intel_private.registers+I810_PGETBL_CTL;
801 	writel(intel_private.PGETBL_save, reg);
802 	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
803 		dev_err(&intel_private.pcidev->dev,
804 			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
805 			readl(reg), intel_private.PGETBL_save);
806 		return false;
807 	}
808 
809 	if (INTEL_GTT_GEN >= 3)
810 		writel(0, intel_private.registers+GFX_FLSH_CNTL);
811 
812 	return true;
813 }
814 EXPORT_SYMBOL(intel_gmch_enable_gtt);
815 
i830_setup(void)816 static int i830_setup(void)
817 {
818 	phys_addr_t reg_addr;
819 
820 	reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
821 
822 	intel_private.registers = ioremap(reg_addr, KB(64));
823 	if (!intel_private.registers)
824 		return -ENOMEM;
825 
826 	intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
827 
828 	return 0;
829 }
830 
831 #if IS_ENABLED(CONFIG_AGP_INTEL)
intel_fake_agp_create_gatt_table(struct agp_bridge_data * bridge)832 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
833 {
834 	agp_bridge->gatt_table_real = NULL;
835 	agp_bridge->gatt_table = NULL;
836 	agp_bridge->gatt_bus_addr = 0;
837 
838 	return 0;
839 }
840 
intel_fake_agp_free_gatt_table(struct agp_bridge_data * bridge)841 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
842 {
843 	return 0;
844 }
845 
intel_fake_agp_configure(void)846 static int intel_fake_agp_configure(void)
847 {
848 	if (!intel_gmch_enable_gtt())
849 		return -EIO;
850 
851 	intel_private.clear_fake_agp = true;
852 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
853 
854 	return 0;
855 }
856 #endif
857 
i830_check_flags(unsigned int flags)858 static bool i830_check_flags(unsigned int flags)
859 {
860 	switch (flags) {
861 	case 0:
862 	case AGP_PHYS_MEMORY:
863 	case AGP_USER_CACHED_MEMORY:
864 	case AGP_USER_MEMORY:
865 		return true;
866 	}
867 
868 	return false;
869 }
870 
intel_gmch_gtt_insert_page(dma_addr_t addr,unsigned int pg,unsigned int flags)871 void intel_gmch_gtt_insert_page(dma_addr_t addr,
872 				unsigned int pg,
873 				unsigned int flags)
874 {
875 	intel_private.driver->write_entry(addr, pg, flags);
876 	readl(intel_private.gtt + pg);
877 	if (intel_private.driver->chipset_flush)
878 		intel_private.driver->chipset_flush();
879 }
880 EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
881 
intel_gmch_gtt_insert_sg_entries(struct sg_table * st,unsigned int pg_start,unsigned int flags)882 void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
883 				      unsigned int pg_start,
884 				      unsigned int flags)
885 {
886 	struct scatterlist *sg;
887 	unsigned int len, m;
888 	int i, j;
889 
890 	j = pg_start;
891 
892 	/* sg may merge pages, but we have to separate
893 	 * per-page addr for GTT */
894 	for_each_sg(st->sgl, sg, st->nents, i) {
895 		len = sg_dma_len(sg) >> PAGE_SHIFT;
896 		for (m = 0; m < len; m++) {
897 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
898 			intel_private.driver->write_entry(addr, j, flags);
899 			j++;
900 		}
901 	}
902 	readl(intel_private.gtt + j - 1);
903 	if (intel_private.driver->chipset_flush)
904 		intel_private.driver->chipset_flush();
905 }
906 EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
907 
intel_gmch_gtt_read_entry(unsigned int pg,bool * is_present,bool * is_local)908 dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
909 				     bool *is_present, bool *is_local)
910 {
911 	return intel_private.driver->read_entry(pg, is_present, is_local);
912 }
913 EXPORT_SYMBOL(intel_gmch_gtt_read_entry);
914 
915 #if IS_ENABLED(CONFIG_AGP_INTEL)
intel_gmch_gtt_insert_pages(unsigned int first_entry,unsigned int num_entries,struct page ** pages,unsigned int flags)916 static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
917 					unsigned int num_entries,
918 					struct page **pages,
919 					unsigned int flags)
920 {
921 	int i, j;
922 
923 	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
924 		dma_addr_t addr = page_to_phys(pages[i]);
925 		intel_private.driver->write_entry(addr,
926 						  j, flags);
927 	}
928 	wmb();
929 }
930 
intel_fake_agp_insert_entries(struct agp_memory * mem,off_t pg_start,int type)931 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
932 					 off_t pg_start, int type)
933 {
934 	int ret = -EINVAL;
935 
936 	if (intel_private.clear_fake_agp) {
937 		int start = intel_private.stolen_size / PAGE_SIZE;
938 		int end = intel_private.gtt_mappable_entries;
939 		intel_gmch_gtt_clear_range(start, end - start);
940 		intel_private.clear_fake_agp = false;
941 	}
942 
943 	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
944 		return i810_insert_dcache_entries(mem, pg_start, type);
945 
946 	if (mem->page_count == 0)
947 		goto out;
948 
949 	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
950 		goto out_err;
951 
952 	if (type != mem->type)
953 		goto out_err;
954 
955 	if (!intel_private.driver->check_flags(type))
956 		goto out_err;
957 
958 	if (!mem->is_flushed)
959 		global_cache_flush();
960 
961 	if (intel_private.needs_dmar) {
962 		struct sg_table st;
963 
964 		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
965 		if (ret != 0)
966 			return ret;
967 
968 		intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
969 		mem->sg_list = st.sgl;
970 		mem->num_sg = st.nents;
971 	} else
972 		intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
973 					    type);
974 
975 out:
976 	ret = 0;
977 out_err:
978 	mem->is_flushed = true;
979 	return ret;
980 }
981 #endif
982 
intel_gmch_gtt_clear_range(unsigned int first_entry,unsigned int num_entries)983 void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
984 {
985 	unsigned int i;
986 
987 	for (i = first_entry; i < (first_entry + num_entries); i++) {
988 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
989 						  i, 0);
990 	}
991 	wmb();
992 }
993 EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
994 
995 #if IS_ENABLED(CONFIG_AGP_INTEL)
intel_fake_agp_remove_entries(struct agp_memory * mem,off_t pg_start,int type)996 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
997 					 off_t pg_start, int type)
998 {
999 	if (mem->page_count == 0)
1000 		return 0;
1001 
1002 	intel_gmch_gtt_clear_range(pg_start, mem->page_count);
1003 
1004 	if (intel_private.needs_dmar) {
1005 		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
1006 		mem->sg_list = NULL;
1007 		mem->num_sg = 0;
1008 	}
1009 
1010 	return 0;
1011 }
1012 
intel_fake_agp_alloc_by_type(size_t pg_count,int type)1013 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1014 						       int type)
1015 {
1016 	struct agp_memory *new;
1017 
1018 	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1019 		if (pg_count != intel_private.num_dcache_entries)
1020 			return NULL;
1021 
1022 		new = agp_create_memory(1);
1023 		if (new == NULL)
1024 			return NULL;
1025 
1026 		new->type = AGP_DCACHE_MEMORY;
1027 		new->page_count = pg_count;
1028 		new->num_scratch_pages = 0;
1029 		agp_free_page_array(new);
1030 		return new;
1031 	}
1032 	if (type == AGP_PHYS_MEMORY)
1033 		return alloc_agpphysmem_i8xx(pg_count, type);
1034 	/* always return NULL for other allocation types for now */
1035 	return NULL;
1036 }
1037 #endif
1038 
intel_alloc_chipset_flush_resource(void)1039 static int intel_alloc_chipset_flush_resource(void)
1040 {
1041 	int ret;
1042 	ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1043 				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1044 				     pcibios_align_resource, intel_private.bridge_dev);
1045 
1046 	return ret;
1047 }
1048 
intel_i915_setup_chipset_flush(void)1049 static void intel_i915_setup_chipset_flush(void)
1050 {
1051 	int ret;
1052 	u32 temp;
1053 
1054 	pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1055 	if (!(temp & 0x1)) {
1056 		intel_alloc_chipset_flush_resource();
1057 		intel_private.resource_valid = 1;
1058 		pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1059 	} else {
1060 		temp &= ~1;
1061 
1062 		intel_private.resource_valid = 1;
1063 		intel_private.ifp_resource.start = temp;
1064 		intel_private.ifp_resource.end = temp + PAGE_SIZE;
1065 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1066 		/* some BIOSes reserve this area in a pnp some don't */
1067 		if (ret)
1068 			intel_private.resource_valid = 0;
1069 	}
1070 }
1071 
intel_i965_g33_setup_chipset_flush(void)1072 static void intel_i965_g33_setup_chipset_flush(void)
1073 {
1074 	u32 temp_hi, temp_lo;
1075 	int ret;
1076 
1077 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1078 	pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1079 
1080 	if (!(temp_lo & 0x1)) {
1081 
1082 		intel_alloc_chipset_flush_resource();
1083 
1084 		intel_private.resource_valid = 1;
1085 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1086 			upper_32_bits(intel_private.ifp_resource.start));
1087 		pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1088 	} else {
1089 		u64 l64;
1090 
1091 		temp_lo &= ~0x1;
1092 		l64 = ((u64)temp_hi << 32) | temp_lo;
1093 
1094 		intel_private.resource_valid = 1;
1095 		intel_private.ifp_resource.start = l64;
1096 		intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1097 		ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1098 		/* some BIOSes reserve this area in a pnp some don't */
1099 		if (ret)
1100 			intel_private.resource_valid = 0;
1101 	}
1102 }
1103 
intel_i9xx_setup_flush(void)1104 static void intel_i9xx_setup_flush(void)
1105 {
1106 	/* return if already configured */
1107 	if (intel_private.ifp_resource.start)
1108 		return;
1109 
1110 	if (INTEL_GTT_GEN == 6)
1111 		return;
1112 
1113 	/* setup a resource for this object */
1114 	intel_private.ifp_resource.name = "Intel Flush Page";
1115 	intel_private.ifp_resource.flags = IORESOURCE_MEM;
1116 
1117 	/* Setup chipset flush for 915 */
1118 	if (IS_G33 || INTEL_GTT_GEN >= 4) {
1119 		intel_i965_g33_setup_chipset_flush();
1120 	} else {
1121 		intel_i915_setup_chipset_flush();
1122 	}
1123 
1124 	if (intel_private.ifp_resource.start)
1125 		intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
1126 	if (!intel_private.i9xx_flush_page)
1127 		dev_err(&intel_private.pcidev->dev,
1128 			"can't ioremap flush page - no chipset flushing\n");
1129 }
1130 
i9xx_cleanup(void)1131 static void i9xx_cleanup(void)
1132 {
1133 	if (intel_private.i9xx_flush_page)
1134 		iounmap(intel_private.i9xx_flush_page);
1135 	if (intel_private.resource_valid)
1136 		release_resource(&intel_private.ifp_resource);
1137 	intel_private.ifp_resource.start = 0;
1138 	intel_private.resource_valid = 0;
1139 }
1140 
i9xx_chipset_flush(void)1141 static void i9xx_chipset_flush(void)
1142 {
1143 	wmb();
1144 	if (intel_private.i9xx_flush_page)
1145 		writel(1, intel_private.i9xx_flush_page);
1146 }
1147 
i965_write_entry(dma_addr_t addr,unsigned int entry,unsigned int flags)1148 static void i965_write_entry(dma_addr_t addr,
1149 			     unsigned int entry,
1150 			     unsigned int flags)
1151 {
1152 	u32 pte_flags;
1153 
1154 	pte_flags = I810_PTE_VALID;
1155 	if (flags == AGP_USER_CACHED_MEMORY)
1156 		pte_flags |= I830_PTE_SYSTEM_CACHED;
1157 
1158 	/* Shift high bits down */
1159 	addr |= (addr >> 28) & 0xf0;
1160 	writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1161 }
1162 
i965_read_entry(unsigned int entry,bool * is_present,bool * is_local)1163 static dma_addr_t i965_read_entry(unsigned int entry,
1164 				  bool *is_present, bool *is_local)
1165 {
1166 	u64 val;
1167 
1168 	val = readl(intel_private.gtt + entry);
1169 
1170 	*is_present = val & I810_PTE_VALID;
1171 	*is_local = false;
1172 
1173 	return ((val & 0xf0) << 28) | (val & ~0xfff);
1174 }
1175 
i9xx_setup(void)1176 static int i9xx_setup(void)
1177 {
1178 	phys_addr_t reg_addr;
1179 	int size = KB(512);
1180 
1181 	reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1182 
1183 	intel_private.registers = ioremap(reg_addr, size);
1184 	if (!intel_private.registers)
1185 		return -ENOMEM;
1186 
1187 	switch (INTEL_GTT_GEN) {
1188 	case 3:
1189 		intel_private.gtt_phys_addr =
1190 			pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1191 		break;
1192 	case 5:
1193 		intel_private.gtt_phys_addr = reg_addr + MB(2);
1194 		break;
1195 	default:
1196 		intel_private.gtt_phys_addr = reg_addr + KB(512);
1197 		break;
1198 	}
1199 
1200 	intel_i9xx_setup_flush();
1201 
1202 	return 0;
1203 }
1204 
1205 #if IS_ENABLED(CONFIG_AGP_INTEL)
1206 static const struct agp_bridge_driver intel_fake_agp_driver = {
1207 	.owner			= THIS_MODULE,
1208 	.size_type		= FIXED_APER_SIZE,
1209 	.aperture_sizes		= intel_fake_agp_sizes,
1210 	.num_aperture_sizes	= ARRAY_SIZE(intel_fake_agp_sizes),
1211 	.configure		= intel_fake_agp_configure,
1212 	.fetch_size		= intel_fake_agp_fetch_size,
1213 	.cleanup		= intel_gtt_cleanup,
1214 	.agp_enable		= intel_fake_agp_enable,
1215 	.cache_flush		= global_cache_flush,
1216 	.create_gatt_table	= intel_fake_agp_create_gatt_table,
1217 	.free_gatt_table	= intel_fake_agp_free_gatt_table,
1218 	.insert_memory		= intel_fake_agp_insert_entries,
1219 	.remove_memory		= intel_fake_agp_remove_entries,
1220 	.alloc_by_type		= intel_fake_agp_alloc_by_type,
1221 	.free_by_type		= intel_i810_free_by_type,
1222 	.agp_alloc_page		= agp_generic_alloc_page,
1223 	.agp_alloc_pages        = agp_generic_alloc_pages,
1224 	.agp_destroy_page	= agp_generic_destroy_page,
1225 	.agp_destroy_pages      = agp_generic_destroy_pages,
1226 };
1227 #endif
1228 
1229 static const struct intel_gtt_driver i81x_gtt_driver = {
1230 	.gen = 1,
1231 	.has_pgtbl_enable = 1,
1232 	.dma_mask_size = 32,
1233 	.setup = i810_setup,
1234 	.cleanup = i810_cleanup,
1235 	.check_flags = i830_check_flags,
1236 	.write_entry = i810_write_entry,
1237 	.read_entry = i810_read_entry,
1238 };
1239 static const struct intel_gtt_driver i8xx_gtt_driver = {
1240 	.gen = 2,
1241 	.has_pgtbl_enable = 1,
1242 	.setup = i830_setup,
1243 	.cleanup = i830_cleanup,
1244 	.write_entry = i830_write_entry,
1245 	.read_entry = i830_read_entry,
1246 	.dma_mask_size = 32,
1247 	.check_flags = i830_check_flags,
1248 	.chipset_flush = i830_chipset_flush,
1249 };
1250 static const struct intel_gtt_driver i915_gtt_driver = {
1251 	.gen = 3,
1252 	.has_pgtbl_enable = 1,
1253 	.setup = i9xx_setup,
1254 	.cleanup = i9xx_cleanup,
1255 	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
1256 	.write_entry = i830_write_entry,
1257 	.read_entry = i830_read_entry,
1258 	.dma_mask_size = 32,
1259 	.check_flags = i830_check_flags,
1260 	.chipset_flush = i9xx_chipset_flush,
1261 };
1262 static const struct intel_gtt_driver g33_gtt_driver = {
1263 	.gen = 3,
1264 	.is_g33 = 1,
1265 	.setup = i9xx_setup,
1266 	.cleanup = i9xx_cleanup,
1267 	.write_entry = i965_write_entry,
1268 	.read_entry = i965_read_entry,
1269 	.dma_mask_size = 36,
1270 	.check_flags = i830_check_flags,
1271 	.chipset_flush = i9xx_chipset_flush,
1272 };
1273 static const struct intel_gtt_driver pineview_gtt_driver = {
1274 	.gen = 3,
1275 	.is_pineview = 1, .is_g33 = 1,
1276 	.setup = i9xx_setup,
1277 	.cleanup = i9xx_cleanup,
1278 	.write_entry = i965_write_entry,
1279 	.read_entry = i965_read_entry,
1280 	.dma_mask_size = 36,
1281 	.check_flags = i830_check_flags,
1282 	.chipset_flush = i9xx_chipset_flush,
1283 };
1284 static const struct intel_gtt_driver i965_gtt_driver = {
1285 	.gen = 4,
1286 	.has_pgtbl_enable = 1,
1287 	.setup = i9xx_setup,
1288 	.cleanup = i9xx_cleanup,
1289 	.write_entry = i965_write_entry,
1290 	.read_entry = i965_read_entry,
1291 	.dma_mask_size = 36,
1292 	.check_flags = i830_check_flags,
1293 	.chipset_flush = i9xx_chipset_flush,
1294 };
1295 static const struct intel_gtt_driver g4x_gtt_driver = {
1296 	.gen = 5,
1297 	.setup = i9xx_setup,
1298 	.cleanup = i9xx_cleanup,
1299 	.write_entry = i965_write_entry,
1300 	.read_entry = i965_read_entry,
1301 	.dma_mask_size = 36,
1302 	.check_flags = i830_check_flags,
1303 	.chipset_flush = i9xx_chipset_flush,
1304 };
1305 static const struct intel_gtt_driver ironlake_gtt_driver = {
1306 	.gen = 5,
1307 	.is_ironlake = 1,
1308 	.setup = i9xx_setup,
1309 	.cleanup = i9xx_cleanup,
1310 	.write_entry = i965_write_entry,
1311 	.read_entry = i965_read_entry,
1312 	.dma_mask_size = 36,
1313 	.check_flags = i830_check_flags,
1314 	.chipset_flush = i9xx_chipset_flush,
1315 };
1316 
1317 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
1318  * driver and gmch_driver must be non-null, and find_gmch will determine
1319  * which one should be used if a gmch_chip_id is present.
1320  */
1321 static const struct intel_gtt_driver_description {
1322 	unsigned int gmch_chip_id;
1323 	char *name;
1324 	const struct intel_gtt_driver *gtt_driver;
1325 } intel_gtt_chipsets[] = {
1326 	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1327 		&i81x_gtt_driver},
1328 	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1329 		&i81x_gtt_driver},
1330 	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1331 		&i81x_gtt_driver},
1332 	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1333 		&i81x_gtt_driver},
1334 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1335 		&i8xx_gtt_driver},
1336 	{ PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1337 		&i8xx_gtt_driver},
1338 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
1339 		&i8xx_gtt_driver},
1340 	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1341 		&i8xx_gtt_driver},
1342 	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
1343 		&i8xx_gtt_driver},
1344 	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1345 		&i915_gtt_driver },
1346 	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1347 		&i915_gtt_driver },
1348 	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1349 		&i915_gtt_driver },
1350 	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1351 		&i915_gtt_driver },
1352 	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1353 		&i915_gtt_driver },
1354 	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1355 		&i915_gtt_driver },
1356 	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1357 		&i965_gtt_driver },
1358 	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1359 		&i965_gtt_driver },
1360 	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1361 		&i965_gtt_driver },
1362 	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1363 		&i965_gtt_driver },
1364 	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1365 		&i965_gtt_driver },
1366 	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1367 		&i965_gtt_driver },
1368 	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1369 		&g33_gtt_driver },
1370 	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1371 		&g33_gtt_driver },
1372 	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1373 		&g33_gtt_driver },
1374 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1375 		&pineview_gtt_driver },
1376 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1377 		&pineview_gtt_driver },
1378 	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1379 		&g4x_gtt_driver },
1380 	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1381 		&g4x_gtt_driver },
1382 	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1383 		&g4x_gtt_driver },
1384 	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1385 		&g4x_gtt_driver },
1386 	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1387 		&g4x_gtt_driver },
1388 	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1389 		&g4x_gtt_driver },
1390 	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1391 		&g4x_gtt_driver },
1392 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1393 	    "HD Graphics", &ironlake_gtt_driver },
1394 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1395 	    "HD Graphics", &ironlake_gtt_driver },
1396 	{ 0, NULL, NULL }
1397 };
1398 
find_gmch(u16 device)1399 static int find_gmch(u16 device)
1400 {
1401 	struct pci_dev *gmch_device;
1402 
1403 	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1404 	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1405 		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1406 					     device, gmch_device);
1407 	}
1408 
1409 	if (!gmch_device)
1410 		return 0;
1411 
1412 	intel_private.pcidev = gmch_device;
1413 	return 1;
1414 }
1415 
intel_gmch_probe(struct pci_dev * bridge_pdev,struct pci_dev * gpu_pdev,struct agp_bridge_data * bridge)1416 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1417 		     struct agp_bridge_data *bridge)
1418 {
1419 	int i, mask;
1420 
1421 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1422 		if (gpu_pdev) {
1423 			if (gpu_pdev->device ==
1424 			    intel_gtt_chipsets[i].gmch_chip_id) {
1425 				intel_private.pcidev = pci_dev_get(gpu_pdev);
1426 				intel_private.driver =
1427 					intel_gtt_chipsets[i].gtt_driver;
1428 
1429 				break;
1430 			}
1431 		} else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1432 			intel_private.driver =
1433 				intel_gtt_chipsets[i].gtt_driver;
1434 			break;
1435 		}
1436 	}
1437 
1438 	if (!intel_private.driver)
1439 		return 0;
1440 
1441 #if IS_ENABLED(CONFIG_AGP_INTEL)
1442 	if (bridge) {
1443 		if (INTEL_GTT_GEN > 1)
1444 			return 0;
1445 
1446 		bridge->driver = &intel_fake_agp_driver;
1447 		bridge->dev_private_data = &intel_private;
1448 		bridge->dev = bridge_pdev;
1449 	}
1450 #endif
1451 
1452 
1453 	/*
1454 	 * Can be called from the fake agp driver but also directly from
1455 	 * drm/i915.ko. Hence we need to check whether everything is set up
1456 	 * already.
1457 	 */
1458 	if (intel_private.refcount++)
1459 		return 1;
1460 
1461 	intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1462 
1463 	dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1464 
1465 	if (bridge) {
1466 		mask = intel_private.driver->dma_mask_size;
1467 		if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
1468 			dev_err(&intel_private.pcidev->dev,
1469 				"set gfx device dma mask %d-bit failed!\n",
1470 				mask);
1471 		else
1472 			dma_set_coherent_mask(&intel_private.pcidev->dev,
1473 					      DMA_BIT_MASK(mask));
1474 	}
1475 
1476 	if (intel_gtt_init() != 0) {
1477 		intel_gmch_remove();
1478 
1479 		return 0;
1480 	}
1481 
1482 	return 1;
1483 }
1484 EXPORT_SYMBOL(intel_gmch_probe);
1485 
intel_gmch_gtt_get(u64 * gtt_total,phys_addr_t * mappable_base,resource_size_t * mappable_end)1486 void intel_gmch_gtt_get(u64 *gtt_total,
1487 			phys_addr_t *mappable_base,
1488 			resource_size_t *mappable_end)
1489 {
1490 	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1491 	*mappable_base = intel_private.gma_bus_addr;
1492 	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1493 }
1494 EXPORT_SYMBOL(intel_gmch_gtt_get);
1495 
intel_gmch_gtt_flush(void)1496 void intel_gmch_gtt_flush(void)
1497 {
1498 	if (intel_private.driver->chipset_flush)
1499 		intel_private.driver->chipset_flush();
1500 }
1501 EXPORT_SYMBOL(intel_gmch_gtt_flush);
1502 
intel_gmch_remove(void)1503 void intel_gmch_remove(void)
1504 {
1505 	if (--intel_private.refcount)
1506 		return;
1507 
1508 	if (intel_private.scratch_page)
1509 		intel_gtt_teardown_scratch_page();
1510 	if (intel_private.pcidev)
1511 		pci_dev_put(intel_private.pcidev);
1512 	if (intel_private.bridge_dev)
1513 		pci_dev_put(intel_private.bridge_dev);
1514 	intel_private.driver = NULL;
1515 }
1516 EXPORT_SYMBOL(intel_gmch_remove);
1517 
1518 MODULE_AUTHOR("Dave Jones, Various @Intel");
1519 MODULE_DESCRIPTION("Intel GTT (Graphics Translation Table) routines");
1520 MODULE_LICENSE("GPL and additional rights");
1521