2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/kernel.h>
21 #include <linux/pagemap.h>
22 #include <linux/agp_backend.h>
23 #include <linux/delay.h>
26 #include "intel-agp.h"
27 #include <drm/intel-gtt.h>
28 #include <asm/set_memory.h>
31 * If we have Intel graphics, we're not going to have anything other than
32 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
33 * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
34 * Only newer chipsets need to bother with this, of course.
36 #ifdef CONFIG_INTEL_IOMMU
37 #define USE_PCI_DMA_API 1
39 #define USE_PCI_DMA_API 0
42 struct intel_gtt_driver {
44 unsigned int is_g33 : 1;
45 unsigned int is_pineview : 1;
46 unsigned int is_ironlake : 1;
47 unsigned int has_pgtbl_enable : 1;
48 unsigned int dma_mask_size : 8;
49 /* Chipset specific GTT setup */
51 /* This should undo anything done in ->setup() save the unmapping
52 * of the mmio register file, that's done in the generic code. */
53 void (*cleanup)(void);
54 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
55 /* Flags is a more or less chipset specific opaque value.
56 * For chipsets that need to support old ums (non-gem) code, this
57 * needs to be identical to the various supported agp memory types! */
58 bool (*check_flags)(unsigned int flags);
59 void (*chipset_flush)(void);
62 static struct _intel_private {
63 const struct intel_gtt_driver *driver;
64 struct pci_dev *pcidev; /* device one */
65 struct pci_dev *bridge_dev;
66 u8 __iomem *registers;
67 phys_addr_t gtt_phys_addr;
69 u32 __iomem *gtt; /* I915G */
70 bool clear_fake_agp; /* on first access via agp, fill with scratch */
71 int num_dcache_entries;
72 void __iomem *i9xx_flush_page;
74 struct resource ifp_resource;
76 struct page *scratch_page;
77 phys_addr_t scratch_page_dma;
79 /* Whether i915 needs to use the dmar apis or not. */
80 unsigned int needs_dmar : 1;
81 phys_addr_t gma_bus_addr;
82 /* Size of memory reserved for graphics by the BIOS */
83 unsigned int stolen_size;
84 /* Total number of gtt entries. */
85 unsigned int gtt_total_entries;
86 /* Part of the gtt that is mappable by the cpu, for those chips where
87 * this is not the full gtt. */
88 unsigned int gtt_mappable_entries;
91 #define INTEL_GTT_GEN intel_private.driver->gen
92 #define IS_G33 intel_private.driver->is_g33
93 #define IS_PINEVIEW intel_private.driver->is_pineview
94 #define IS_IRONLAKE intel_private.driver->is_ironlake
95 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
97 #if IS_ENABLED(CONFIG_AGP_INTEL)
98 static int intel_gtt_map_memory(struct page **pages,
99 unsigned int num_entries,
102 struct scatterlist *sg;
105 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
107 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
110 for_each_sg(st->sgl, sg, num_entries, i)
111 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
113 if (!pci_map_sg(intel_private.pcidev,
114 st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
124 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
127 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
129 pci_unmap_sg(intel_private.pcidev, sg_list,
130 num_sg, PCI_DMA_BIDIRECTIONAL);
133 st.orig_nents = st.nents = num_sg;
138 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
143 /* Exists to support ARGB cursors */
144 static struct page *i8xx_alloc_pages(void)
148 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
152 if (set_pages_uc(page, 4) < 0) {
153 set_pages_wb(page, 4);
154 __free_pages(page, 2);
157 atomic_inc(&agp_bridge->current_memory_agp);
161 static void i8xx_destroy_pages(struct page *page)
166 set_pages_wb(page, 4);
167 __free_pages(page, 2);
168 atomic_dec(&agp_bridge->current_memory_agp);
172 #define I810_GTT_ORDER 4
173 static int i810_setup(void)
175 phys_addr_t reg_addr;
178 /* i81x does not preallocate the gtt. It's always 64kb in size. */
179 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
180 if (gtt_table == NULL)
182 intel_private.i81x_gtt_table = gtt_table;
184 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
186 intel_private.registers = ioremap(reg_addr, KB(64));
187 if (!intel_private.registers)
190 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
191 intel_private.registers+I810_PGETBL_CTL);
193 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
195 if ((readl(intel_private.registers+I810_DRAM_CTL)
196 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
197 dev_info(&intel_private.pcidev->dev,
198 "detected 4MB dedicated video ram\n");
199 intel_private.num_dcache_entries = 1024;
205 static void i810_cleanup(void)
207 writel(0, intel_private.registers+I810_PGETBL_CTL);
208 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
211 #if IS_ENABLED(CONFIG_AGP_INTEL)
212 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
217 if ((pg_start + mem->page_count)
218 > intel_private.num_dcache_entries)
221 if (!mem->is_flushed)
222 global_cache_flush();
224 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
225 dma_addr_t addr = i << PAGE_SHIFT;
226 intel_private.driver->write_entry(addr,
235 * The i810/i830 requires a physical address to program its mouse
236 * pointer into hardware.
237 * However the Xserver still writes to it through the agp aperture.
239 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
241 struct agp_memory *new;
245 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
248 /* kludge to get 4 physical pages for ARGB cursor */
249 page = i8xx_alloc_pages();
258 new = agp_create_memory(pg_count);
262 new->pages[0] = page;
264 /* kludge to get 4 physical pages for ARGB cursor */
265 new->pages[1] = new->pages[0] + 1;
266 new->pages[2] = new->pages[1] + 1;
267 new->pages[3] = new->pages[2] + 1;
269 new->page_count = pg_count;
270 new->num_scratch_pages = pg_count;
271 new->type = AGP_PHYS_MEMORY;
272 new->physical = page_to_phys(new->pages[0]);
276 static void intel_i810_free_by_type(struct agp_memory *curr)
278 agp_free_key(curr->key);
279 if (curr->type == AGP_PHYS_MEMORY) {
280 if (curr->page_count == 4)
281 i8xx_destroy_pages(curr->pages[0]);
283 agp_bridge->driver->agp_destroy_page(curr->pages[0],
284 AGP_PAGE_DESTROY_UNMAP);
285 agp_bridge->driver->agp_destroy_page(curr->pages[0],
286 AGP_PAGE_DESTROY_FREE);
288 agp_free_page_array(curr);
294 static int intel_gtt_setup_scratch_page(void)
299 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
302 set_pages_uc(page, 1);
304 if (intel_private.needs_dmar) {
305 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
306 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
307 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
310 intel_private.scratch_page_dma = dma_addr;
312 intel_private.scratch_page_dma = page_to_phys(page);
314 intel_private.scratch_page = page;
319 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
322 u32 pte_flags = I810_PTE_VALID;
325 case AGP_DCACHE_MEMORY:
326 pte_flags |= I810_PTE_LOCAL;
328 case AGP_USER_CACHED_MEMORY:
329 pte_flags |= I830_PTE_SYSTEM_CACHED;
333 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
336 static unsigned int intel_gtt_stolen_size(void)
341 static const int ddt[4] = { 0, 16, 32, 64 };
342 unsigned int stolen_size = 0;
344 if (INTEL_GTT_GEN == 1)
345 return 0; /* no stolen mem on i81x */
347 pci_read_config_word(intel_private.bridge_dev,
348 I830_GMCH_CTRL, &gmch_ctrl);
350 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
351 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
352 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
353 case I830_GMCH_GMS_STOLEN_512:
354 stolen_size = KB(512);
356 case I830_GMCH_GMS_STOLEN_1024:
359 case I830_GMCH_GMS_STOLEN_8192:
362 case I830_GMCH_GMS_LOCAL:
363 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
364 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
365 MB(ddt[I830_RDRAM_DDT(rdct)]);
373 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
374 case I855_GMCH_GMS_STOLEN_1M:
377 case I855_GMCH_GMS_STOLEN_4M:
380 case I855_GMCH_GMS_STOLEN_8M:
383 case I855_GMCH_GMS_STOLEN_16M:
384 stolen_size = MB(16);
386 case I855_GMCH_GMS_STOLEN_32M:
387 stolen_size = MB(32);
389 case I915_GMCH_GMS_STOLEN_48M:
390 stolen_size = MB(48);
392 case I915_GMCH_GMS_STOLEN_64M:
393 stolen_size = MB(64);
395 case G33_GMCH_GMS_STOLEN_128M:
396 stolen_size = MB(128);
398 case G33_GMCH_GMS_STOLEN_256M:
399 stolen_size = MB(256);
401 case INTEL_GMCH_GMS_STOLEN_96M:
402 stolen_size = MB(96);
404 case INTEL_GMCH_GMS_STOLEN_160M:
405 stolen_size = MB(160);
407 case INTEL_GMCH_GMS_STOLEN_224M:
408 stolen_size = MB(224);
410 case INTEL_GMCH_GMS_STOLEN_352M:
411 stolen_size = MB(352);
419 if (stolen_size > 0) {
420 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
421 stolen_size / KB(1), local ? "local" : "stolen");
423 dev_info(&intel_private.bridge_dev->dev,
424 "no pre-allocated video memory detected\n");
431 static void i965_adjust_pgetbl_size(unsigned int size_flag)
433 u32 pgetbl_ctl, pgetbl_ctl2;
435 /* ensure that ppgtt is disabled */
436 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
437 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
438 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
440 /* write the new ggtt size */
441 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
442 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
443 pgetbl_ctl |= size_flag;
444 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
447 static unsigned int i965_gtt_total_entries(void)
453 pci_read_config_word(intel_private.bridge_dev,
454 I830_GMCH_CTRL, &gmch_ctl);
456 if (INTEL_GTT_GEN == 5) {
457 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
458 case G4x_GMCH_SIZE_1M:
459 case G4x_GMCH_SIZE_VT_1M:
460 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
462 case G4x_GMCH_SIZE_VT_1_5M:
463 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
465 case G4x_GMCH_SIZE_2M:
466 case G4x_GMCH_SIZE_VT_2M:
467 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
472 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
474 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
475 case I965_PGETBL_SIZE_128KB:
478 case I965_PGETBL_SIZE_256KB:
481 case I965_PGETBL_SIZE_512KB:
484 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
485 case I965_PGETBL_SIZE_1MB:
488 case I965_PGETBL_SIZE_2MB:
491 case I965_PGETBL_SIZE_1_5MB:
492 size = KB(1024 + 512);
495 dev_info(&intel_private.pcidev->dev,
496 "unknown page table size, assuming 512KB\n");
503 static unsigned int intel_gtt_total_entries(void)
505 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
506 return i965_gtt_total_entries();
508 /* On previous hardware, the GTT size was just what was
509 * required to map the aperture.
511 return intel_private.gtt_mappable_entries;
515 static unsigned int intel_gtt_mappable_entries(void)
517 unsigned int aperture_size;
519 if (INTEL_GTT_GEN == 1) {
522 pci_read_config_dword(intel_private.bridge_dev,
523 I810_SMRAM_MISCC, &smram_miscc);
525 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
526 == I810_GFX_MEM_WIN_32M)
527 aperture_size = MB(32);
529 aperture_size = MB(64);
530 } else if (INTEL_GTT_GEN == 2) {
533 pci_read_config_word(intel_private.bridge_dev,
534 I830_GMCH_CTRL, &gmch_ctrl);
536 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
537 aperture_size = MB(64);
539 aperture_size = MB(128);
541 /* 9xx supports large sizes, just look at the length */
542 aperture_size = pci_resource_len(intel_private.pcidev, 2);
545 return aperture_size >> PAGE_SHIFT;
548 static void intel_gtt_teardown_scratch_page(void)
550 set_pages_wb(intel_private.scratch_page, 1);
551 if (intel_private.needs_dmar)
552 pci_unmap_page(intel_private.pcidev,
553 intel_private.scratch_page_dma,
554 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
555 __free_page(intel_private.scratch_page);
558 static void intel_gtt_cleanup(void)
560 intel_private.driver->cleanup();
562 iounmap(intel_private.gtt);
563 iounmap(intel_private.registers);
565 intel_gtt_teardown_scratch_page();
568 /* Certain Gen5 chipsets require require idling the GPU before
569 * unmapping anything from the GTT when VT-d is enabled.
571 static inline int needs_ilk_vtd_wa(void)
573 #ifdef CONFIG_INTEL_IOMMU
574 const unsigned short gpu_devid = intel_private.pcidev->device;
576 /* Query intel_iommu to see if we need the workaround. Presumably that
579 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
580 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
581 intel_iommu_gfx_mapped)
587 static bool intel_gtt_can_wc(void)
589 if (INTEL_GTT_GEN <= 2)
592 if (INTEL_GTT_GEN >= 6)
595 /* Reports of major corruption with ILK vt'd enabled */
596 if (needs_ilk_vtd_wa())
602 static int intel_gtt_init(void)
607 ret = intel_private.driver->setup();
611 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
612 intel_private.gtt_total_entries = intel_gtt_total_entries();
614 /* save the PGETBL reg for resume */
615 intel_private.PGETBL_save =
616 readl(intel_private.registers+I810_PGETBL_CTL)
617 & ~I810_PGETBL_ENABLED;
618 /* we only ever restore the register when enabling the PGTBL... */
620 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
622 dev_info(&intel_private.bridge_dev->dev,
623 "detected gtt size: %dK total, %dK mappable\n",
624 intel_private.gtt_total_entries * 4,
625 intel_private.gtt_mappable_entries * 4);
627 gtt_map_size = intel_private.gtt_total_entries * 4;
629 intel_private.gtt = NULL;
630 if (intel_gtt_can_wc())
631 intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
633 if (intel_private.gtt == NULL)
634 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
636 if (intel_private.gtt == NULL) {
637 intel_private.driver->cleanup();
638 iounmap(intel_private.registers);
642 #if IS_ENABLED(CONFIG_AGP_INTEL)
643 global_cache_flush(); /* FIXME: ? */
646 intel_private.stolen_size = intel_gtt_stolen_size();
648 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
650 ret = intel_gtt_setup_scratch_page();
656 if (INTEL_GTT_GEN <= 2)
657 bar = I810_GMADR_BAR;
659 bar = I915_GMADR_BAR;
661 intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
665 #if IS_ENABLED(CONFIG_AGP_INTEL)
666 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
674 static int intel_fake_agp_fetch_size(void)
676 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
677 unsigned int aper_size;
680 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
682 for (i = 0; i < num_sizes; i++) {
683 if (aper_size == intel_fake_agp_sizes[i].size) {
684 agp_bridge->current_size =
685 (void *) (intel_fake_agp_sizes + i);
694 static void i830_cleanup(void)
698 /* The chipset_flush interface needs to get data that has already been
699 * flushed out of the CPU all the way out to main memory, because the GPU
700 * doesn't snoop those buffers.
702 * The 8xx series doesn't have the same lovely interface for flushing the
703 * chipset write buffers that the later chips do. According to the 865
704 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
705 * that buffer out, we just fill 1KB and clflush it out, on the assumption
706 * that it'll push whatever was in there out. It appears to work.
708 static void i830_chipset_flush(void)
710 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
712 /* Forcibly evict everything from the CPU write buffers.
713 * clflush appears to be insufficient.
715 wbinvd_on_all_cpus();
717 /* Now we've only seen documents for this magic bit on 855GM,
718 * we hope it exists for the other gen2 chipsets...
720 * Also works as advertised on my 845G.
722 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
723 intel_private.registers+I830_HIC);
725 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
726 if (time_after(jiffies, timeout))
733 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
736 u32 pte_flags = I810_PTE_VALID;
738 if (flags == AGP_USER_CACHED_MEMORY)
739 pte_flags |= I830_PTE_SYSTEM_CACHED;
741 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
744 bool intel_enable_gtt(void)
748 if (INTEL_GTT_GEN == 2) {
751 pci_read_config_word(intel_private.bridge_dev,
752 I830_GMCH_CTRL, &gmch_ctrl);
753 gmch_ctrl |= I830_GMCH_ENABLED;
754 pci_write_config_word(intel_private.bridge_dev,
755 I830_GMCH_CTRL, gmch_ctrl);
757 pci_read_config_word(intel_private.bridge_dev,
758 I830_GMCH_CTRL, &gmch_ctrl);
759 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
760 dev_err(&intel_private.pcidev->dev,
761 "failed to enable the GTT: GMCH_CTRL=%x\n",
767 /* On the resume path we may be adjusting the PGTBL value, so
768 * be paranoid and flush all chipset write buffers...
770 if (INTEL_GTT_GEN >= 3)
771 writel(0, intel_private.registers+GFX_FLSH_CNTL);
773 reg = intel_private.registers+I810_PGETBL_CTL;
774 writel(intel_private.PGETBL_save, reg);
775 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
776 dev_err(&intel_private.pcidev->dev,
777 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
778 readl(reg), intel_private.PGETBL_save);
782 if (INTEL_GTT_GEN >= 3)
783 writel(0, intel_private.registers+GFX_FLSH_CNTL);
787 EXPORT_SYMBOL(intel_enable_gtt);
789 static int i830_setup(void)
791 phys_addr_t reg_addr;
793 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
795 intel_private.registers = ioremap(reg_addr, KB(64));
796 if (!intel_private.registers)
799 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
804 #if IS_ENABLED(CONFIG_AGP_INTEL)
805 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
807 agp_bridge->gatt_table_real = NULL;
808 agp_bridge->gatt_table = NULL;
809 agp_bridge->gatt_bus_addr = 0;
814 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
819 static int intel_fake_agp_configure(void)
821 if (!intel_enable_gtt())
824 intel_private.clear_fake_agp = true;
825 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
831 static bool i830_check_flags(unsigned int flags)
835 case AGP_PHYS_MEMORY:
836 case AGP_USER_CACHED_MEMORY:
837 case AGP_USER_MEMORY:
844 void intel_gtt_insert_page(dma_addr_t addr,
848 intel_private.driver->write_entry(addr, pg, flags);
849 if (intel_private.driver->chipset_flush)
850 intel_private.driver->chipset_flush();
852 EXPORT_SYMBOL(intel_gtt_insert_page);
854 void intel_gtt_insert_sg_entries(struct sg_table *st,
855 unsigned int pg_start,
858 struct scatterlist *sg;
864 /* sg may merge pages, but we have to separate
865 * per-page addr for GTT */
866 for_each_sg(st->sgl, sg, st->nents, i) {
867 len = sg_dma_len(sg) >> PAGE_SHIFT;
868 for (m = 0; m < len; m++) {
869 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
870 intel_private.driver->write_entry(addr, j, flags);
876 EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
878 #if IS_ENABLED(CONFIG_AGP_INTEL)
879 static void intel_gtt_insert_pages(unsigned int first_entry,
880 unsigned int num_entries,
886 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
887 dma_addr_t addr = page_to_phys(pages[i]);
888 intel_private.driver->write_entry(addr,
894 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
895 off_t pg_start, int type)
899 if (intel_private.clear_fake_agp) {
900 int start = intel_private.stolen_size / PAGE_SIZE;
901 int end = intel_private.gtt_mappable_entries;
902 intel_gtt_clear_range(start, end - start);
903 intel_private.clear_fake_agp = false;
906 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
907 return i810_insert_dcache_entries(mem, pg_start, type);
909 if (mem->page_count == 0)
912 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
915 if (type != mem->type)
918 if (!intel_private.driver->check_flags(type))
921 if (!mem->is_flushed)
922 global_cache_flush();
924 if (intel_private.needs_dmar) {
927 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
931 intel_gtt_insert_sg_entries(&st, pg_start, type);
932 mem->sg_list = st.sgl;
933 mem->num_sg = st.nents;
935 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
941 mem->is_flushed = true;
946 void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
950 for (i = first_entry; i < (first_entry + num_entries); i++) {
951 intel_private.driver->write_entry(intel_private.scratch_page_dma,
956 EXPORT_SYMBOL(intel_gtt_clear_range);
958 #if IS_ENABLED(CONFIG_AGP_INTEL)
959 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
960 off_t pg_start, int type)
962 if (mem->page_count == 0)
965 intel_gtt_clear_range(pg_start, mem->page_count);
967 if (intel_private.needs_dmar) {
968 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
976 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
979 struct agp_memory *new;
981 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
982 if (pg_count != intel_private.num_dcache_entries)
985 new = agp_create_memory(1);
989 new->type = AGP_DCACHE_MEMORY;
990 new->page_count = pg_count;
991 new->num_scratch_pages = 0;
992 agp_free_page_array(new);
995 if (type == AGP_PHYS_MEMORY)
996 return alloc_agpphysmem_i8xx(pg_count, type);
997 /* always return NULL for other allocation types for now */
1002 static int intel_alloc_chipset_flush_resource(void)
1005 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1006 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1007 pcibios_align_resource, intel_private.bridge_dev);
1012 static void intel_i915_setup_chipset_flush(void)
1017 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1018 if (!(temp & 0x1)) {
1019 intel_alloc_chipset_flush_resource();
1020 intel_private.resource_valid = 1;
1021 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1025 intel_private.resource_valid = 1;
1026 intel_private.ifp_resource.start = temp;
1027 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1028 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1029 /* some BIOSes reserve this area in a pnp some don't */
1031 intel_private.resource_valid = 0;
1035 static void intel_i965_g33_setup_chipset_flush(void)
1037 u32 temp_hi, temp_lo;
1040 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1041 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1043 if (!(temp_lo & 0x1)) {
1045 intel_alloc_chipset_flush_resource();
1047 intel_private.resource_valid = 1;
1048 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1049 upper_32_bits(intel_private.ifp_resource.start));
1050 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1055 l64 = ((u64)temp_hi << 32) | temp_lo;
1057 intel_private.resource_valid = 1;
1058 intel_private.ifp_resource.start = l64;
1059 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1060 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1061 /* some BIOSes reserve this area in a pnp some don't */
1063 intel_private.resource_valid = 0;
1067 static void intel_i9xx_setup_flush(void)
1069 /* return if already configured */
1070 if (intel_private.ifp_resource.start)
1073 if (INTEL_GTT_GEN == 6)
1076 /* setup a resource for this object */
1077 intel_private.ifp_resource.name = "Intel Flush Page";
1078 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1080 /* Setup chipset flush for 915 */
1081 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1082 intel_i965_g33_setup_chipset_flush();
1084 intel_i915_setup_chipset_flush();
1087 if (intel_private.ifp_resource.start)
1088 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1089 if (!intel_private.i9xx_flush_page)
1090 dev_err(&intel_private.pcidev->dev,
1091 "can't ioremap flush page - no chipset flushing\n");
1094 static void i9xx_cleanup(void)
1096 if (intel_private.i9xx_flush_page)
1097 iounmap(intel_private.i9xx_flush_page);
1098 if (intel_private.resource_valid)
1099 release_resource(&intel_private.ifp_resource);
1100 intel_private.ifp_resource.start = 0;
1101 intel_private.resource_valid = 0;
1104 static void i9xx_chipset_flush(void)
1106 if (intel_private.i9xx_flush_page)
1107 writel(1, intel_private.i9xx_flush_page);
1110 static void i965_write_entry(dma_addr_t addr,
1116 pte_flags = I810_PTE_VALID;
1117 if (flags == AGP_USER_CACHED_MEMORY)
1118 pte_flags |= I830_PTE_SYSTEM_CACHED;
1120 /* Shift high bits down */
1121 addr |= (addr >> 28) & 0xf0;
1122 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1125 static int i9xx_setup(void)
1127 phys_addr_t reg_addr;
1130 reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1132 intel_private.registers = ioremap(reg_addr, size);
1133 if (!intel_private.registers)
1136 switch (INTEL_GTT_GEN) {
1138 intel_private.gtt_phys_addr =
1139 pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1142 intel_private.gtt_phys_addr = reg_addr + MB(2);
1145 intel_private.gtt_phys_addr = reg_addr + KB(512);
1149 intel_i9xx_setup_flush();
1154 #if IS_ENABLED(CONFIG_AGP_INTEL)
1155 static const struct agp_bridge_driver intel_fake_agp_driver = {
1156 .owner = THIS_MODULE,
1157 .size_type = FIXED_APER_SIZE,
1158 .aperture_sizes = intel_fake_agp_sizes,
1159 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1160 .configure = intel_fake_agp_configure,
1161 .fetch_size = intel_fake_agp_fetch_size,
1162 .cleanup = intel_gtt_cleanup,
1163 .agp_enable = intel_fake_agp_enable,
1164 .cache_flush = global_cache_flush,
1165 .create_gatt_table = intel_fake_agp_create_gatt_table,
1166 .free_gatt_table = intel_fake_agp_free_gatt_table,
1167 .insert_memory = intel_fake_agp_insert_entries,
1168 .remove_memory = intel_fake_agp_remove_entries,
1169 .alloc_by_type = intel_fake_agp_alloc_by_type,
1170 .free_by_type = intel_i810_free_by_type,
1171 .agp_alloc_page = agp_generic_alloc_page,
1172 .agp_alloc_pages = agp_generic_alloc_pages,
1173 .agp_destroy_page = agp_generic_destroy_page,
1174 .agp_destroy_pages = agp_generic_destroy_pages,
1178 static const struct intel_gtt_driver i81x_gtt_driver = {
1180 .has_pgtbl_enable = 1,
1181 .dma_mask_size = 32,
1182 .setup = i810_setup,
1183 .cleanup = i810_cleanup,
1184 .check_flags = i830_check_flags,
1185 .write_entry = i810_write_entry,
1187 static const struct intel_gtt_driver i8xx_gtt_driver = {
1189 .has_pgtbl_enable = 1,
1190 .setup = i830_setup,
1191 .cleanup = i830_cleanup,
1192 .write_entry = i830_write_entry,
1193 .dma_mask_size = 32,
1194 .check_flags = i830_check_flags,
1195 .chipset_flush = i830_chipset_flush,
1197 static const struct intel_gtt_driver i915_gtt_driver = {
1199 .has_pgtbl_enable = 1,
1200 .setup = i9xx_setup,
1201 .cleanup = i9xx_cleanup,
1202 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1203 .write_entry = i830_write_entry,
1204 .dma_mask_size = 32,
1205 .check_flags = i830_check_flags,
1206 .chipset_flush = i9xx_chipset_flush,
1208 static const struct intel_gtt_driver g33_gtt_driver = {
1211 .setup = i9xx_setup,
1212 .cleanup = i9xx_cleanup,
1213 .write_entry = i965_write_entry,
1214 .dma_mask_size = 36,
1215 .check_flags = i830_check_flags,
1216 .chipset_flush = i9xx_chipset_flush,
1218 static const struct intel_gtt_driver pineview_gtt_driver = {
1220 .is_pineview = 1, .is_g33 = 1,
1221 .setup = i9xx_setup,
1222 .cleanup = i9xx_cleanup,
1223 .write_entry = i965_write_entry,
1224 .dma_mask_size = 36,
1225 .check_flags = i830_check_flags,
1226 .chipset_flush = i9xx_chipset_flush,
1228 static const struct intel_gtt_driver i965_gtt_driver = {
1230 .has_pgtbl_enable = 1,
1231 .setup = i9xx_setup,
1232 .cleanup = i9xx_cleanup,
1233 .write_entry = i965_write_entry,
1234 .dma_mask_size = 36,
1235 .check_flags = i830_check_flags,
1236 .chipset_flush = i9xx_chipset_flush,
1238 static const struct intel_gtt_driver g4x_gtt_driver = {
1240 .setup = i9xx_setup,
1241 .cleanup = i9xx_cleanup,
1242 .write_entry = i965_write_entry,
1243 .dma_mask_size = 36,
1244 .check_flags = i830_check_flags,
1245 .chipset_flush = i9xx_chipset_flush,
1247 static const struct intel_gtt_driver ironlake_gtt_driver = {
1250 .setup = i9xx_setup,
1251 .cleanup = i9xx_cleanup,
1252 .write_entry = i965_write_entry,
1253 .dma_mask_size = 36,
1254 .check_flags = i830_check_flags,
1255 .chipset_flush = i9xx_chipset_flush,
1258 /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1259 * driver and gmch_driver must be non-null, and find_gmch will determine
1260 * which one should be used if a gmch_chip_id is present.
1262 static const struct intel_gtt_driver_description {
1263 unsigned int gmch_chip_id;
1265 const struct intel_gtt_driver *gtt_driver;
1266 } intel_gtt_chipsets[] = {
1267 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1269 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1271 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1273 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1275 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1277 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1279 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1281 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1283 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1285 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1287 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1289 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1291 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1293 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1295 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1297 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1299 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1301 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1303 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1305 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1307 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1309 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1311 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1313 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1315 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1316 &pineview_gtt_driver },
1317 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1318 &pineview_gtt_driver },
1319 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1321 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1323 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1325 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1327 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1329 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1331 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1333 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1334 "HD Graphics", &ironlake_gtt_driver },
1335 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1336 "HD Graphics", &ironlake_gtt_driver },
1340 static int find_gmch(u16 device)
1342 struct pci_dev *gmch_device;
1344 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1345 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1346 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1347 device, gmch_device);
1353 intel_private.pcidev = gmch_device;
1357 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1358 struct agp_bridge_data *bridge)
1362 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1364 if (gpu_pdev->device ==
1365 intel_gtt_chipsets[i].gmch_chip_id) {
1366 intel_private.pcidev = pci_dev_get(gpu_pdev);
1367 intel_private.driver =
1368 intel_gtt_chipsets[i].gtt_driver;
1372 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1373 intel_private.driver =
1374 intel_gtt_chipsets[i].gtt_driver;
1379 if (!intel_private.driver)
1382 #if IS_ENABLED(CONFIG_AGP_INTEL)
1384 if (INTEL_GTT_GEN > 1)
1387 bridge->driver = &intel_fake_agp_driver;
1388 bridge->dev_private_data = &intel_private;
1389 bridge->dev = bridge_pdev;
1395 * Can be called from the fake agp driver but also directly from
1396 * drm/i915.ko. Hence we need to check whether everything is set up
1399 if (intel_private.refcount++)
1402 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1404 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1406 mask = intel_private.driver->dma_mask_size;
1407 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1408 dev_err(&intel_private.pcidev->dev,
1409 "set gfx device dma mask %d-bit failed!\n", mask);
1411 pci_set_consistent_dma_mask(intel_private.pcidev,
1412 DMA_BIT_MASK(mask));
1414 if (intel_gtt_init() != 0) {
1415 intel_gmch_remove();
1422 EXPORT_SYMBOL(intel_gmch_probe);
1424 void intel_gtt_get(u64 *gtt_total,
1426 phys_addr_t *mappable_base,
1429 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1430 *stolen_size = intel_private.stolen_size;
1431 *mappable_base = intel_private.gma_bus_addr;
1432 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1434 EXPORT_SYMBOL(intel_gtt_get);
1436 void intel_gtt_chipset_flush(void)
1438 if (intel_private.driver->chipset_flush)
1439 intel_private.driver->chipset_flush();
1441 EXPORT_SYMBOL(intel_gtt_chipset_flush);
1443 void intel_gmch_remove(void)
1445 if (--intel_private.refcount)
1448 if (intel_private.scratch_page)
1449 intel_gtt_teardown_scratch_page();
1450 if (intel_private.pcidev)
1451 pci_dev_put(intel_private.pcidev);
1452 if (intel_private.bridge_dev)
1453 pci_dev_put(intel_private.bridge_dev);
1454 intel_private.driver = NULL;
1456 EXPORT_SYMBOL(intel_gmch_remove);
1458 MODULE_AUTHOR("Dave Jones, Various @Intel");
1459 MODULE_LICENSE("GPL and additional rights");