Lines Matching +full:non +full:- +full:masked

1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021-2023 Intel Corporation
9 #include <linux/io-64-nonatomic-lo-hi.h>
33 tile->mmio.regs = NULL; in tiles_fini()
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
43 * .----------------------. <- tile_count * tile_mmio_size
45 * |----------------------| <- 2 * tile_mmio_size
47 * |----------------------| <- 1 * tile_mmio_size + 4MB
48 * | tile1->mmio.regs |
49 * |----------------------| <- 1 * tile_mmio_size
51 * |----------------------| <- 4MB
52 * | tile0->mmio.regs |
53 * '----------------------' <- 0MB
62 * entire BAR mapped - see xe_mmio_probe_early() in mmio_multi_tile_setup()
64 if (xe->info.tile_count == 1) in mmio_multi_tile_setup()
68 if (!xe->info.skip_mtcfg) { in mmio_multi_tile_setup()
74 * Although the per-tile mmio regs are not yet initialized, this in mmio_multi_tile_setup()
81 if (tile_count < xe->info.tile_count) { in mmio_multi_tile_setup()
82 drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n", in mmio_multi_tile_setup()
83 xe->info.tile_count, tile_count); in mmio_multi_tile_setup()
84 xe->info.tile_count = tile_count; in mmio_multi_tile_setup()
88 * should be impossible with multi-tile for now: in mmio_multi_tile_setup()
89 * multi-tile platform with standalone media doesn't in mmio_multi_tile_setup()
92 xe->info.gt_count = xe->info.tile_count; in mmio_multi_tile_setup()
97 xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M); in mmio_multi_tile_setup()
106 return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe); in xe_mmio_probe_tiles()
114 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs); in mmio_fini()
115 xe->mmio.regs = NULL; in mmio_fini()
116 root_tile->mmio.regs = NULL; in mmio_fini()
122 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_mmio_probe_early()
127 * registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB). in xe_mmio_probe_early()
129 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); in xe_mmio_probe_early()
130 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); in xe_mmio_probe_early()
131 if (!xe->mmio.regs) { in xe_mmio_probe_early()
132 drm_err(&xe->drm, "failed to map registers\n"); in xe_mmio_probe_early()
133 return -EIO; in xe_mmio_probe_early()
137 xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M); in xe_mmio_probe_early()
139 return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); in xe_mmio_probe_early()
143 * xe_mmio_init() - Initialize an MMIO instance
155 mmio->regs = ptr; in xe_mmio_init()
156 mmio->regs_size = size; in xe_mmio_init()
157 mmio->tile = tile; in xe_mmio_init()
165 if (mmio->tile->xe->info.platform != XE_LUNARLAKE) in mmio_flush_pending_writes()
170 writel(0, mmio->regs + DUMMY_REG_OFFSET); in mmio_flush_pending_writes()
181 val = readb(mmio->regs + addr); in xe_mmio_read8()
195 val = readw(mmio->regs + addr); in xe_mmio_read16()
207 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_write32()
208 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); in xe_mmio_write32()
210 writel(val, mmio->regs + addr); in xe_mmio_write32()
221 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_read32()
222 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); in xe_mmio_read32()
224 val = readl(mmio->regs + addr); in xe_mmio_read32()
250 return (reg_val & mask) != eval ? -EINVAL : 0; in xe_mmio_write32_and_verify()
259 return range && addr >= range->start && addr <= range->end; in xe_mmio_in_range()
263 * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
267 * Although Intel GPUs have some 64-bit registers, the hardware officially
270 * spec shouldn't be relied upon and all 64-bit register reads should be
271 * performed as two 32-bit reads of the upper and lower dwords.
274 * counters), a rollover of the lower dword between the two 32-bit reads
276 * stabilized before returning the 64-bit value.
278 * Note that because this function may re-read the register multiple times
282 * Returns the value of the 64-bit register.
293 xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); in xe_mmio_read64_2x32()
296 for (retries = 5; retries; --retries) { in xe_mmio_read64_2x32()
306 drm_WARN(&mmio->tile->xe->drm, retries == 0, in xe_mmio_read64_2x32()
307 "64-bit read of %#x did not stabilize\n", reg.addr); in xe_mmio_read64_2x32()
317 int ret = -ETIMEDOUT; in __xe_mmio_wait32()
366 * xe_mmio_wait32() - Wait for a register to match the desired masked value
376 * This function polls for the desired masked value and returns zero on success
377 * or -ETIMEDOUT if timed out.
381 * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
391 * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value