Lines Matching full:mmio
33 tile->mmio.regs = NULL; in tiles_fini()
37 * On multi-tile devices, partition the BAR space for MMIO on each tile,
48 * | tile1->mmio.regs |
52 * | tile0->mmio.regs |
69 struct xe_mmio *mmio = xe_root_tile_mmio(xe); in mmio_multi_tile_setup() local
74 * Although the per-tile mmio regs are not yet initialized, this in mmio_multi_tile_setup()
75 * is fine as it's going to the root tile's mmio, that's in mmio_multi_tile_setup()
78 mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR); in mmio_multi_tile_setup()
97 xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M); in mmio_multi_tile_setup()
114 pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs); in mmio_fini()
115 xe->mmio.regs = NULL; in mmio_fini()
116 root_tile->mmio.regs = NULL; in mmio_fini()
129 xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR); in xe_mmio_probe_early()
130 xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0); in xe_mmio_probe_early()
131 if (!xe->mmio.regs) { in xe_mmio_probe_early()
137 xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M); in xe_mmio_probe_early()
143 * xe_mmio_init() - Initialize an MMIO instance
144 * @mmio: Pointer to the MMIO instance to initialize
145 * @tile: The tile to which the MMIO region belongs
146 * @ptr: Pointer to the start of the MMIO region
147 * @size: The size of the MMIO region in bytes
151 void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size) in xe_mmio_init() argument
155 mmio->regs = ptr; in xe_mmio_init()
156 mmio->regs_size = size; in xe_mmio_init()
157 mmio->tile = tile; in xe_mmio_init()
160 static void mmio_flush_pending_writes(struct xe_mmio *mmio) in mmio_flush_pending_writes() argument
165 if (mmio->tile->xe->info.platform != XE_LUNARLAKE) in mmio_flush_pending_writes()
170 writel(0, mmio->regs + DUMMY_REG_OFFSET); in mmio_flush_pending_writes()
173 u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read8() argument
175 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read8()
179 mmio_flush_pending_writes(mmio); in xe_mmio_read8()
181 val = readb(mmio->regs + addr); in xe_mmio_read8()
182 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read8()
187 u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read16() argument
189 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read16()
193 mmio_flush_pending_writes(mmio); in xe_mmio_read16()
195 val = readw(mmio->regs + addr); in xe_mmio_read16()
196 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read16()
201 void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) in xe_mmio_write32() argument
203 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_write32()
205 trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); in xe_mmio_write32()
207 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_write32()
208 xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); in xe_mmio_write32()
210 writel(val, mmio->regs + addr); in xe_mmio_write32()
213 u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read32() argument
215 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read32()
219 mmio_flush_pending_writes(mmio); in xe_mmio_read32()
221 if (!reg.vf && mmio->sriov_vf_gt) in xe_mmio_read32()
222 val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); in xe_mmio_read32()
224 val = readl(mmio->regs + addr); in xe_mmio_read32()
226 trace_xe_reg_rw(mmio, false, addr, val, sizeof(val)); in xe_mmio_read32()
231 u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set) in xe_mmio_rmw32() argument
235 old = xe_mmio_read32(mmio, reg); in xe_mmio_rmw32()
237 xe_mmio_write32(mmio, reg, reg_val); in xe_mmio_rmw32()
242 int xe_mmio_write32_and_verify(struct xe_mmio *mmio, in xe_mmio_write32_and_verify() argument
247 xe_mmio_write32(mmio, reg, val); in xe_mmio_write32_and_verify()
248 reg_val = xe_mmio_read32(mmio, reg); in xe_mmio_write32_and_verify()
253 bool xe_mmio_in_range(const struct xe_mmio *mmio, in xe_mmio_in_range() argument
257 u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_in_range()
264 * @mmio: MMIO target
284 u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg) in xe_mmio_read64_2x32() argument
289 reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr); in xe_mmio_read64_2x32()
290 reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr); in xe_mmio_read64_2x32()
293 xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4); in xe_mmio_read64_2x32()
295 oldudw = xe_mmio_read32(mmio, reg_udw); in xe_mmio_read64_2x32()
297 ldw = xe_mmio_read32(mmio, reg); in xe_mmio_read64_2x32()
298 udw = xe_mmio_read32(mmio, reg_udw); in xe_mmio_read64_2x32()
306 drm_WARN(&mmio->tile->xe->drm, retries == 0, in xe_mmio_read64_2x32()
312 static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, in __xe_mmio_wait32() argument
323 read = xe_mmio_read32(mmio, reg); in __xe_mmio_wait32()
349 read = xe_mmio_read32(mmio, reg); in __xe_mmio_wait32()
367 * @mmio: MMIO target
384 int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, in xe_mmio_wait32() argument
387 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true); in xe_mmio_wait32()
392 * @mmio: MMIO target
403 int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, in xe_mmio_wait32_not() argument
406 return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false); in xe_mmio_wait32_not()