1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013--2024 Intel Corporation
4 */
5
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/firmware.h>
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <linux/pci-ats.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/property.h>
19 #include <linux/scatterlist.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22
23 #include <media/ipu-bridge.h>
24 #include <media/ipu6-pci-table.h>
25
26 #include "ipu6.h"
27 #include "ipu6-bus.h"
28 #include "ipu6-buttress.h"
29 #include "ipu6-cpd.h"
30 #include "ipu6-isys.h"
31 #include "ipu6-mmu.h"
32 #include "ipu6-platform-buttress-regs.h"
33 #include "ipu6-platform-isys-csi2-reg.h"
34 #include "ipu6-platform-regs.h"
35
36 #define IPU6_PCI_BAR 0
37
38 struct ipu6_cell_program {
39 u32 magic_number;
40
41 u32 blob_offset;
42 u32 blob_size;
43
44 u32 start[3];
45
46 u32 icache_source;
47 u32 icache_target;
48 u32 icache_size;
49
50 u32 pmem_source;
51 u32 pmem_target;
52 u32 pmem_size;
53
54 u32 data_source;
55 u32 data_target;
56 u32 data_size;
57
58 u32 bss_target;
59 u32 bss_size;
60
61 u32 cell_id;
62 u32 regs_addr;
63
64 u32 cell_pmem_data_bus_address;
65 u32 cell_dmem_data_bus_address;
66 u32 cell_pmem_control_bus_address;
67 u32 cell_dmem_control_bus_address;
68
69 u32 next;
70 u32 dummy[2];
71 };
72
73 static struct ipu6_isys_internal_pdata isys_ipdata = {
74 .hw_variant = {
75 .offset = IPU6_UNIFIED_OFFSET,
76 .nr_mmus = 3,
77 .mmu_hw = {
78 {
79 .offset = IPU6_ISYS_IOMMU0_OFFSET,
80 .info_bits = IPU6_INFO_REQUEST_DESTINATION_IOSF,
81 .nr_l1streams = 16,
82 .l1_block_sz = {
83 3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
84 1, 1, 1, 1, 1, 1
85 },
86 .nr_l2streams = 16,
87 .l2_block_sz = {
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2
90 },
91 .insert_read_before_invalidate = false,
92 .l1_stream_id_reg_offset =
93 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
94 .l2_stream_id_reg_offset =
95 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
96 },
97 {
98 .offset = IPU6_ISYS_IOMMU1_OFFSET,
99 .info_bits = 0,
100 .nr_l1streams = 16,
101 .l1_block_sz = {
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 1, 1, 4
104 },
105 .nr_l2streams = 16,
106 .l2_block_sz = {
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2
109 },
110 .insert_read_before_invalidate = false,
111 .l1_stream_id_reg_offset =
112 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
113 .l2_stream_id_reg_offset =
114 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
115 },
116 {
117 .offset = IPU6_ISYS_IOMMUI_OFFSET,
118 .info_bits = 0,
119 .nr_l1streams = 0,
120 .nr_l2streams = 0,
121 .insert_read_before_invalidate = false,
122 },
123 },
124 .cdc_fifos = 3,
125 .cdc_fifo_threshold = {6, 8, 2},
126 .dmem_offset = IPU6_ISYS_DMEM_OFFSET,
127 .spc_offset = IPU6_ISYS_SPC_OFFSET,
128 },
129 .isys_dma_overshoot = IPU6_ISYS_OVERALLOC_MIN,
130 };
131
132 static struct ipu6_psys_internal_pdata psys_ipdata = {
133 .hw_variant = {
134 .offset = IPU6_UNIFIED_OFFSET,
135 .nr_mmus = 4,
136 .mmu_hw = {
137 {
138 .offset = IPU6_PSYS_IOMMU0_OFFSET,
139 .info_bits =
140 IPU6_INFO_REQUEST_DESTINATION_IOSF,
141 .nr_l1streams = 16,
142 .l1_block_sz = {
143 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
144 2, 2, 2, 2, 2, 2
145 },
146 .nr_l2streams = 16,
147 .l2_block_sz = {
148 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
149 2, 2, 2, 2, 2, 2
150 },
151 .insert_read_before_invalidate = false,
152 .l1_stream_id_reg_offset =
153 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
154 .l2_stream_id_reg_offset =
155 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
156 },
157 {
158 .offset = IPU6_PSYS_IOMMU1_OFFSET,
159 .info_bits = 0,
160 .nr_l1streams = 32,
161 .l1_block_sz = {
162 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
163 2, 2, 2, 2, 2, 10,
164 5, 4, 14, 6, 4, 14, 6, 4, 8,
165 4, 2, 1, 1, 1, 1, 14
166 },
167 .nr_l2streams = 32,
168 .l2_block_sz = {
169 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
170 2, 2, 2, 2, 2, 2,
171 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
172 2, 2, 2, 2, 2, 2
173 },
174 .insert_read_before_invalidate = false,
175 .l1_stream_id_reg_offset =
176 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
177 .l2_stream_id_reg_offset =
178 IPU6_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
179 },
180 {
181 .offset = IPU6_PSYS_IOMMU1R_OFFSET,
182 .info_bits = 0,
183 .nr_l1streams = 16,
184 .l1_block_sz = {
185 1, 4, 4, 4, 4, 16, 8, 4, 32,
186 16, 16, 2, 2, 2, 1, 12
187 },
188 .nr_l2streams = 16,
189 .l2_block_sz = {
190 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
191 2, 2, 2, 2, 2, 2
192 },
193 .insert_read_before_invalidate = false,
194 .l1_stream_id_reg_offset =
195 IPU6_MMU_L1_STREAM_ID_REG_OFFSET,
196 .l2_stream_id_reg_offset =
197 IPU6_MMU_L2_STREAM_ID_REG_OFFSET,
198 },
199 {
200 .offset = IPU6_PSYS_IOMMUI_OFFSET,
201 .info_bits = 0,
202 .nr_l1streams = 0,
203 .nr_l2streams = 0,
204 .insert_read_before_invalidate = false,
205 },
206 },
207 .dmem_offset = IPU6_PSYS_DMEM_OFFSET,
208 },
209 };
210
211 static const struct ipu6_buttress_ctrl isys_buttress_ctrl = {
212 .ratio = IPU6_IS_FREQ_CTL_DEFAULT_RATIO,
213 .qos_floor = IPU6_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
214 .freq_ctl = IPU6_BUTTRESS_REG_IS_FREQ_CTL,
215 .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
216 .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_IS_PWR_MASK,
217 .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
218 .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
219 };
220
221 static const struct ipu6_buttress_ctrl psys_buttress_ctrl = {
222 .ratio = IPU6_PS_FREQ_CTL_DEFAULT_RATIO,
223 .qos_floor = IPU6_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
224 .freq_ctl = IPU6_BUTTRESS_REG_PS_FREQ_CTL,
225 .pwr_sts_shift = IPU6_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
226 .pwr_sts_mask = IPU6_BUTTRESS_PWR_STATE_PS_PWR_MASK,
227 .pwr_sts_on = IPU6_BUTTRESS_PWR_STATE_UP_DONE,
228 .pwr_sts_off = IPU6_BUTTRESS_PWR_STATE_DN_DONE,
229 };
230
231 static void
ipu6_pkg_dir_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_vied_address)232 ipu6_pkg_dir_configure_spc(struct ipu6_device *isp,
233 const struct ipu6_hw_variants *hw_variant,
234 int pkg_dir_idx, void __iomem *base,
235 u64 *pkg_dir, dma_addr_t pkg_dir_vied_address)
236 {
237 struct ipu6_cell_program *prog;
238 void __iomem *spc_base;
239 u32 server_fw_addr;
240 dma_addr_t dma_addr;
241 u32 pg_offset;
242
243 server_fw_addr = lower_32_bits(*(pkg_dir + (pkg_dir_idx + 1) * 2));
244 if (pkg_dir_idx == IPU6_CPD_PKG_DIR_ISYS_SERVER_IDX)
245 dma_addr = sg_dma_address(isp->isys->fw_sgt.sgl);
246 else
247 dma_addr = sg_dma_address(isp->psys->fw_sgt.sgl);
248
249 pg_offset = server_fw_addr - dma_addr;
250 prog = (struct ipu6_cell_program *)((uintptr_t)isp->cpd_fw->data +
251 pg_offset);
252 spc_base = base + prog->regs_addr;
253 if (spc_base != (base + hw_variant->spc_offset))
254 dev_warn(&isp->pdev->dev,
255 "SPC reg addr %p not matching value from CPD %p\n",
256 base + hw_variant->spc_offset, spc_base);
257 writel(server_fw_addr + prog->blob_offset +
258 prog->icache_source, spc_base + IPU6_PSYS_REG_SPC_ICACHE_BASE);
259 writel(IPU6_INFO_REQUEST_DESTINATION_IOSF,
260 spc_base + IPU6_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
261 writel(prog->start[1], spc_base + IPU6_PSYS_REG_SPC_START_PC);
262 writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
263 }
264
ipu6_configure_spc(struct ipu6_device * isp,const struct ipu6_hw_variants * hw_variant,int pkg_dir_idx,void __iomem * base,u64 * pkg_dir,dma_addr_t pkg_dir_dma_addr)265 void ipu6_configure_spc(struct ipu6_device *isp,
266 const struct ipu6_hw_variants *hw_variant,
267 int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
268 dma_addr_t pkg_dir_dma_addr)
269 {
270 void __iomem *dmem_base = base + hw_variant->dmem_offset;
271 void __iomem *spc_regs_base = base + hw_variant->spc_offset;
272 u32 val;
273
274 val = readl(spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
275 val |= IPU6_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
276 writel(val, spc_regs_base + IPU6_PSYS_REG_SPC_STATUS_CTRL);
277
278 if (isp->secure_mode)
279 writel(IPU6_PKG_DIR_IMR_OFFSET, dmem_base);
280 else
281 ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
282 pkg_dir, pkg_dir_dma_addr);
283 }
284 EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, "INTEL_IPU6");
285
286 #define IPU6_ISYS_CSI2_NPORTS 4
287 #define IPU6SE_ISYS_CSI2_NPORTS 4
288 #define IPU6_TGL_ISYS_CSI2_NPORTS 8
289 #define IPU6EP_MTL_ISYS_CSI2_NPORTS 6
290
ipu6_internal_pdata_init(struct ipu6_device * isp)291 static void ipu6_internal_pdata_init(struct ipu6_device *isp)
292 {
293 u8 hw_ver = isp->hw_ver;
294
295 isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
296 isys_ipdata.sram_gran_shift = IPU6_SRAM_GRANULARITY_SHIFT;
297 isys_ipdata.sram_gran_size = IPU6_SRAM_GRANULARITY_SIZE;
298 isys_ipdata.max_sram_size = IPU6_MAX_SRAM_SIZE;
299 isys_ipdata.sensor_type_start = IPU6_FW_ISYS_SENSOR_TYPE_START;
300 isys_ipdata.sensor_type_end = IPU6_FW_ISYS_SENSOR_TYPE_END;
301 isys_ipdata.max_streams = IPU6_ISYS_NUM_STREAMS;
302 isys_ipdata.max_send_queues = IPU6_N_MAX_SEND_QUEUES;
303 isys_ipdata.max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
304 isys_ipdata.max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
305 isys_ipdata.csi2.nports = IPU6_ISYS_CSI2_NPORTS;
306 isys_ipdata.csi2.irq_mask = IPU6_CSI_RX_ERROR_IRQ_MASK;
307 isys_ipdata.csi2.ctrl0_irq_edge = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
308 isys_ipdata.csi2.ctrl0_irq_clear =
309 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
310 isys_ipdata.csi2.ctrl0_irq_mask = IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
311 isys_ipdata.csi2.ctrl0_irq_enable =
312 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
313 isys_ipdata.csi2.ctrl0_irq_status =
314 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
315 isys_ipdata.csi2.ctrl0_irq_lnp =
316 IPU6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
317 isys_ipdata.enhanced_iwake = is_ipu6ep_mtl(hw_ver) || is_ipu6ep(hw_ver);
318 psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
319 isys_ipdata.csi2.fw_access_port_ofs = CSI_REG_HUB_FW_ACCESS_PORT_OFS;
320
321 if (is_ipu6ep(hw_ver)) {
322 isys_ipdata.ltr = IPU6EP_LTR_VALUE;
323 isys_ipdata.memopen_threshold = IPU6EP_MIN_MEMOPEN_TH;
324 }
325
326 if (is_ipu6_tgl(hw_ver))
327 isys_ipdata.csi2.nports = IPU6_TGL_ISYS_CSI2_NPORTS;
328
329 if (is_ipu6ep_mtl(hw_ver)) {
330 isys_ipdata.csi2.nports = IPU6EP_MTL_ISYS_CSI2_NPORTS;
331
332 isys_ipdata.csi2.ctrl0_irq_edge =
333 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE;
334 isys_ipdata.csi2.ctrl0_irq_clear =
335 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR;
336 isys_ipdata.csi2.ctrl0_irq_mask =
337 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK;
338 isys_ipdata.csi2.ctrl0_irq_enable =
339 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE;
340 isys_ipdata.csi2.ctrl0_irq_lnp =
341 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE;
342 isys_ipdata.csi2.ctrl0_irq_status =
343 IPU6V6_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS;
344 isys_ipdata.csi2.fw_access_port_ofs =
345 CSI_REG_HUB_FW_ACCESS_PORT_V6OFS;
346 isys_ipdata.ltr = IPU6EP_MTL_LTR_VALUE;
347 isys_ipdata.memopen_threshold = IPU6EP_MTL_MIN_MEMOPEN_TH;
348 }
349
350 if (is_ipu6se(hw_ver)) {
351 isys_ipdata.csi2.nports = IPU6SE_ISYS_CSI2_NPORTS;
352 isys_ipdata.csi2.irq_mask = IPU6SE_CSI_RX_ERROR_IRQ_MASK;
353 isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
354 isys_ipdata.sram_gran_shift = IPU6SE_SRAM_GRANULARITY_SHIFT;
355 isys_ipdata.sram_gran_size = IPU6SE_SRAM_GRANULARITY_SIZE;
356 isys_ipdata.max_sram_size = IPU6SE_MAX_SRAM_SIZE;
357 isys_ipdata.sensor_type_start =
358 IPU6SE_FW_ISYS_SENSOR_TYPE_START;
359 isys_ipdata.sensor_type_end = IPU6SE_FW_ISYS_SENSOR_TYPE_END;
360 isys_ipdata.max_streams = IPU6SE_ISYS_NUM_STREAMS;
361 isys_ipdata.max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
362 isys_ipdata.max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
363 isys_ipdata.max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
364 psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
365 }
366 }
367
368 static struct ipu6_bus_device *
ipu6_isys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_isys_internal_pdata * ipdata)369 ipu6_isys_init(struct pci_dev *pdev, struct device *parent,
370 struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
371 const struct ipu6_isys_internal_pdata *ipdata)
372 {
373 struct device *dev = &pdev->dev;
374 struct ipu6_bus_device *isys_adev;
375 struct ipu6_isys_pdata *pdata;
376 int ret;
377
378 ret = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
379 if (ret) {
380 dev_err_probe(dev, ret, "IPU6 bridge init failed\n");
381 return ERR_PTR(ret);
382 }
383
384 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
385 if (!pdata)
386 return ERR_PTR(-ENOMEM);
387
388 pdata->base = base;
389 pdata->ipdata = ipdata;
390
391 isys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
392 IPU6_ISYS_NAME);
393 if (IS_ERR(isys_adev)) {
394 kfree(pdata);
395 return dev_err_cast_probe(dev, isys_adev,
396 "ipu6_bus_initialize_device isys failed\n");
397 }
398
399 isys_adev->mmu = ipu6_mmu_init(dev, base, ISYS_MMID,
400 &ipdata->hw_variant);
401 if (IS_ERR(isys_adev->mmu)) {
402 put_device(&isys_adev->auxdev.dev);
403 kfree(pdata);
404 return dev_err_cast_probe(dev, isys_adev->mmu,
405 "ipu6_mmu_init(isys_adev->mmu) failed\n");
406 }
407
408 isys_adev->mmu->dev = &isys_adev->auxdev.dev;
409
410 ret = ipu6_bus_add_device(isys_adev);
411 if (ret) {
412 kfree(pdata);
413 return ERR_PTR(ret);
414 }
415
416 return isys_adev;
417 }
418
419 static struct ipu6_bus_device *
ipu6_psys_init(struct pci_dev * pdev,struct device * parent,struct ipu6_buttress_ctrl * ctrl,void __iomem * base,const struct ipu6_psys_internal_pdata * ipdata)420 ipu6_psys_init(struct pci_dev *pdev, struct device *parent,
421 struct ipu6_buttress_ctrl *ctrl, void __iomem *base,
422 const struct ipu6_psys_internal_pdata *ipdata)
423 {
424 struct ipu6_bus_device *psys_adev;
425 struct ipu6_psys_pdata *pdata;
426 int ret;
427
428 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
429 if (!pdata)
430 return ERR_PTR(-ENOMEM);
431
432 pdata->base = base;
433 pdata->ipdata = ipdata;
434
435 psys_adev = ipu6_bus_initialize_device(pdev, parent, pdata, ctrl,
436 IPU6_PSYS_NAME);
437 if (IS_ERR(psys_adev)) {
438 kfree(pdata);
439 return dev_err_cast_probe(&pdev->dev, psys_adev,
440 "ipu6_bus_initialize_device psys failed\n");
441 }
442
443 psys_adev->mmu = ipu6_mmu_init(&pdev->dev, base, PSYS_MMID,
444 &ipdata->hw_variant);
445 if (IS_ERR(psys_adev->mmu)) {
446 put_device(&psys_adev->auxdev.dev);
447 kfree(pdata);
448 return dev_err_cast_probe(&pdev->dev, psys_adev->mmu,
449 "ipu6_mmu_init(psys_adev->mmu) failed\n");
450 }
451
452 psys_adev->mmu->dev = &psys_adev->auxdev.dev;
453
454 ret = ipu6_bus_add_device(psys_adev);
455 if (ret) {
456 kfree(pdata);
457 return ERR_PTR(ret);
458 }
459
460 return psys_adev;
461 }
462
ipu6_pci_config_setup(struct pci_dev * dev,u8 hw_ver)463 static int ipu6_pci_config_setup(struct pci_dev *dev, u8 hw_ver)
464 {
465 int ret;
466
467 /* No PCI msi capability for IPU6EP */
468 if (is_ipu6ep(hw_ver) || is_ipu6ep_mtl(hw_ver)) {
469 /* likely do nothing as msi not enabled by default */
470 pci_disable_msi(dev);
471 return 0;
472 }
473
474 ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSI);
475 if (ret < 0)
476 return dev_err_probe(&dev->dev, ret, "Request msi failed");
477
478 return 0;
479 }
480
ipu6_configure_vc_mechanism(struct ipu6_device * isp)481 static void ipu6_configure_vc_mechanism(struct ipu6_device *isp)
482 {
483 u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
484
485 if (IPU6_BTRS_ARB_STALL_MODE_VC0 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
486 val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
487 else
488 val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
489
490 if (IPU6_BTRS_ARB_STALL_MODE_VC1 == IPU6_BTRS_ARB_MODE_TYPE_STALL)
491 val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
492 else
493 val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
494
495 writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
496 }
497
ipu6_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)498 static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
499 {
500 struct ipu6_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
501 struct device *dev = &pdev->dev;
502 void __iomem *isys_base = NULL;
503 void __iomem *psys_base = NULL;
504 struct ipu6_device *isp;
505 phys_addr_t phys;
506 u32 val, version, sku_id;
507 int ret;
508
509 isp = devm_kzalloc(dev, sizeof(*isp), GFP_KERNEL);
510 if (!isp)
511 return -ENOMEM;
512
513 isp->pdev = pdev;
514 INIT_LIST_HEAD(&isp->devices);
515
516 ret = pcim_enable_device(pdev);
517 if (ret)
518 return dev_err_probe(dev, ret, "Enable PCI device failed\n");
519
520 phys = pci_resource_start(pdev, IPU6_PCI_BAR);
521 dev_dbg(dev, "IPU6 PCI bar[%u] = %pa\n", IPU6_PCI_BAR, &phys);
522
523 isp->base = pcim_iomap_region(pdev, IPU6_PCI_BAR, IPU6_NAME);
524 if (IS_ERR(isp->base))
525 return dev_err_probe(dev, PTR_ERR(isp->base),
526 "Failed to I/O mem remapping\n");
527
528 pci_set_drvdata(pdev, isp);
529 pci_set_master(pdev);
530
531 isp->cpd_metadata_cmpnt_size = sizeof(struct ipu6_cpd_metadata_cmpnt);
532 switch (id->device) {
533 case PCI_DEVICE_ID_INTEL_IPU6:
534 isp->hw_ver = IPU6_VER_6;
535 isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
536 break;
537 case PCI_DEVICE_ID_INTEL_IPU6SE:
538 isp->hw_ver = IPU6_VER_6SE;
539 isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
540 isp->cpd_metadata_cmpnt_size =
541 sizeof(struct ipu6se_cpd_metadata_cmpnt);
542 break;
543 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLP:
544 case PCI_DEVICE_ID_INTEL_IPU6EP_RPLP:
545 isp->hw_ver = IPU6_VER_6EP;
546 isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
547 break;
548 case PCI_DEVICE_ID_INTEL_IPU6EP_ADLN:
549 isp->hw_ver = IPU6_VER_6EP;
550 isp->cpd_fw_name = IPU6EPADLN_FIRMWARE_NAME;
551 break;
552 case PCI_DEVICE_ID_INTEL_IPU6EP_MTL:
553 isp->hw_ver = IPU6_VER_6EP_MTL;
554 isp->cpd_fw_name = IPU6EPMTL_FIRMWARE_NAME;
555 break;
556 default:
557 return dev_err_probe(dev, -ENODEV,
558 "Unsupported IPU6 device %x\n",
559 id->device);
560 }
561
562 ipu6_internal_pdata_init(isp);
563
564 isys_base = isp->base + isys_ipdata.hw_variant.offset;
565 psys_base = isp->base + psys_ipdata.hw_variant.offset;
566
567 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
568 if (ret)
569 return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
570
571 dma_set_max_seg_size(dev, UINT_MAX);
572
573 ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
574 if (ret)
575 return ret;
576
577 ret = ipu6_buttress_init(isp);
578 if (ret)
579 return ret;
580
581 ret = request_firmware(&isp->cpd_fw, isp->cpd_fw_name, dev);
582 if (ret) {
583 dev_err_probe(&isp->pdev->dev, ret,
584 "Requesting signed firmware %s failed\n",
585 isp->cpd_fw_name);
586 goto buttress_exit;
587 }
588
589 ret = ipu6_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
590 isp->cpd_fw->size);
591 if (ret) {
592 dev_err_probe(&isp->pdev->dev, ret,
593 "Failed to validate cpd\n");
594 goto out_ipu6_bus_del_devices;
595 }
596
597 isys_ctrl = devm_kmemdup(dev, &isys_buttress_ctrl,
598 sizeof(isys_buttress_ctrl), GFP_KERNEL);
599 if (!isys_ctrl) {
600 ret = -ENOMEM;
601 goto out_ipu6_bus_del_devices;
602 }
603
604 isp->isys = ipu6_isys_init(pdev, dev, isys_ctrl, isys_base,
605 &isys_ipdata);
606 if (IS_ERR(isp->isys)) {
607 ret = PTR_ERR(isp->isys);
608 goto out_ipu6_bus_del_devices;
609 }
610
611 psys_ctrl = devm_kmemdup(dev, &psys_buttress_ctrl,
612 sizeof(psys_buttress_ctrl), GFP_KERNEL);
613 if (!psys_ctrl) {
614 ret = -ENOMEM;
615 goto out_ipu6_bus_del_devices;
616 }
617
618 isp->psys = ipu6_psys_init(pdev, &isp->isys->auxdev.dev, psys_ctrl,
619 psys_base, &psys_ipdata);
620 if (IS_ERR(isp->psys)) {
621 ret = PTR_ERR(isp->psys);
622 goto out_ipu6_bus_del_devices;
623 }
624
625 ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
626 if (ret < 0)
627 goto out_ipu6_bus_del_devices;
628
629 ret = ipu6_mmu_hw_init(isp->psys->mmu);
630 if (ret) {
631 dev_err_probe(&isp->pdev->dev, ret,
632 "Failed to set MMU hardware\n");
633 goto out_ipu6_bus_del_devices;
634 }
635
636 ret = ipu6_buttress_map_fw_image(isp->psys, isp->cpd_fw,
637 &isp->psys->fw_sgt);
638 if (ret) {
639 dev_err_probe(&isp->pdev->dev, ret, "failed to map fw image\n");
640 goto out_ipu6_bus_del_devices;
641 }
642
643 ret = ipu6_cpd_create_pkg_dir(isp->psys, isp->cpd_fw->data);
644 if (ret) {
645 dev_err_probe(&isp->pdev->dev, ret,
646 "failed to create pkg dir\n");
647 goto out_ipu6_bus_del_devices;
648 }
649
650 ret = devm_request_threaded_irq(dev, pdev->irq, ipu6_buttress_isr,
651 ipu6_buttress_isr_threaded,
652 IRQF_SHARED, IPU6_NAME, isp);
653 if (ret) {
654 dev_err_probe(dev, ret, "Requesting irq failed\n");
655 goto out_ipu6_bus_del_devices;
656 }
657
658 ret = ipu6_buttress_authenticate(isp);
659 if (ret) {
660 dev_err_probe(&isp->pdev->dev, ret,
661 "FW authentication failed\n");
662 goto out_free_irq;
663 }
664
665 ipu6_mmu_hw_cleanup(isp->psys->mmu);
666 pm_runtime_put(&isp->psys->auxdev.dev);
667
668 /* Configure the arbitration mechanisms for VC requests */
669 ipu6_configure_vc_mechanism(isp);
670
671 val = readl(isp->base + BUTTRESS_REG_SKU);
672 sku_id = FIELD_GET(GENMASK(6, 4), val);
673 version = FIELD_GET(GENMASK(3, 0), val);
674 dev_info(dev, "IPU%u-v%u[%x] hardware version %d\n", version, sku_id,
675 pdev->device, isp->hw_ver);
676
677 pm_runtime_put_noidle(dev);
678 pm_runtime_allow(dev);
679
680 isp->bus_ready_to_probe = true;
681
682 return 0;
683
684 out_free_irq:
685 devm_free_irq(dev, pdev->irq, isp);
686 out_ipu6_bus_del_devices:
687 if (isp->psys) {
688 ipu6_cpd_free_pkg_dir(isp->psys);
689 ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
690 }
691 if (!IS_ERR_OR_NULL(isp->psys) && !IS_ERR_OR_NULL(isp->psys->mmu))
692 ipu6_mmu_cleanup(isp->psys->mmu);
693 if (!IS_ERR_OR_NULL(isp->isys) && !IS_ERR_OR_NULL(isp->isys->mmu))
694 ipu6_mmu_cleanup(isp->isys->mmu);
695 ipu6_bus_del_devices(pdev);
696 release_firmware(isp->cpd_fw);
697 buttress_exit:
698 ipu6_buttress_exit(isp);
699
700 return ret;
701 }
702
ipu6_pci_remove(struct pci_dev * pdev)703 static void ipu6_pci_remove(struct pci_dev *pdev)
704 {
705 struct ipu6_device *isp = pci_get_drvdata(pdev);
706 struct ipu6_mmu *isys_mmu = isp->isys->mmu;
707 struct ipu6_mmu *psys_mmu = isp->psys->mmu;
708
709 devm_free_irq(&pdev->dev, pdev->irq, isp);
710 ipu6_cpd_free_pkg_dir(isp->psys);
711
712 ipu6_buttress_unmap_fw_image(isp->psys, &isp->psys->fw_sgt);
713 ipu6_buttress_exit(isp);
714
715 ipu6_bus_del_devices(pdev);
716
717 pm_runtime_forbid(&pdev->dev);
718 pm_runtime_get_noresume(&pdev->dev);
719
720 release_firmware(isp->cpd_fw);
721
722 ipu6_mmu_cleanup(psys_mmu);
723 ipu6_mmu_cleanup(isys_mmu);
724 }
725
ipu6_pci_reset_prepare(struct pci_dev * pdev)726 static void ipu6_pci_reset_prepare(struct pci_dev *pdev)
727 {
728 struct ipu6_device *isp = pci_get_drvdata(pdev);
729
730 pm_runtime_forbid(&isp->pdev->dev);
731 }
732
ipu6_pci_reset_done(struct pci_dev * pdev)733 static void ipu6_pci_reset_done(struct pci_dev *pdev)
734 {
735 struct ipu6_device *isp = pci_get_drvdata(pdev);
736
737 ipu6_buttress_restore(isp);
738 if (isp->secure_mode)
739 ipu6_buttress_reset_authentication(isp);
740
741 isp->need_ipc_reset = true;
742 pm_runtime_allow(&isp->pdev->dev);
743 }
744
745 /*
746 * PCI base driver code requires driver to provide these to enable
747 * PCI device level PM state transitions (D0<->D3)
748 */
ipu6_suspend(struct device * dev)749 static int ipu6_suspend(struct device *dev)
750 {
751 struct pci_dev *pdev = to_pci_dev(dev);
752
753 synchronize_irq(pdev->irq);
754 return 0;
755 }
756
ipu6_resume(struct device * dev)757 static int ipu6_resume(struct device *dev)
758 {
759 struct pci_dev *pdev = to_pci_dev(dev);
760 struct ipu6_device *isp = pci_get_drvdata(pdev);
761 struct ipu6_buttress *b = &isp->buttress;
762 int ret;
763
764 /* Configure the arbitration mechanisms for VC requests */
765 ipu6_configure_vc_mechanism(isp);
766
767 isp->secure_mode = ipu6_buttress_get_secure_mode(isp);
768 dev_info(dev, "IPU6 in %s mode\n",
769 isp->secure_mode ? "secure" : "non-secure");
770
771 ipu6_buttress_restore(isp);
772
773 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
774 if (ret)
775 dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
776
777 ret = pm_runtime_resume_and_get(&isp->psys->auxdev.dev);
778 if (ret < 0) {
779 dev_err(&isp->psys->auxdev.dev, "Failed to get runtime PM\n");
780 return 0;
781 }
782
783 ret = ipu6_buttress_authenticate(isp);
784 if (ret)
785 dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n", ret);
786
787 pm_runtime_put(&isp->psys->auxdev.dev);
788
789 return 0;
790 }
791
ipu6_runtime_resume(struct device * dev)792 static int ipu6_runtime_resume(struct device *dev)
793 {
794 struct pci_dev *pdev = to_pci_dev(dev);
795 struct ipu6_device *isp = pci_get_drvdata(pdev);
796 int ret;
797
798 ipu6_configure_vc_mechanism(isp);
799 ipu6_buttress_restore(isp);
800
801 if (isp->need_ipc_reset) {
802 struct ipu6_buttress *b = &isp->buttress;
803
804 isp->need_ipc_reset = false;
805 ret = ipu6_buttress_ipc_reset(isp, &b->cse);
806 if (ret)
807 dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
808 }
809
810 return 0;
811 }
812
813 static const struct dev_pm_ops ipu6_pm_ops = {
814 SYSTEM_SLEEP_PM_OPS(&ipu6_suspend, &ipu6_resume)
815 RUNTIME_PM_OPS(&ipu6_suspend, &ipu6_runtime_resume, NULL)
816 };
817
818 MODULE_DEVICE_TABLE(pci, ipu6_pci_tbl);
819
820 static const struct pci_error_handlers pci_err_handlers = {
821 .reset_prepare = ipu6_pci_reset_prepare,
822 .reset_done = ipu6_pci_reset_done,
823 };
824
825 static struct pci_driver ipu6_pci_driver = {
826 .name = IPU6_NAME,
827 .id_table = ipu6_pci_tbl,
828 .probe = ipu6_pci_probe,
829 .remove = ipu6_pci_remove,
830 .driver = {
831 .pm = pm_ptr(&ipu6_pm_ops),
832 },
833 .err_handler = &pci_err_handlers,
834 };
835
836 module_pci_driver(ipu6_pci_driver);
837
838 MODULE_IMPORT_NS("INTEL_IPU_BRIDGE");
839 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
840 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
841 MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
842 MODULE_AUTHOR("Qingwu Zhang <qingwu.zhang@intel.com>");
843 MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
844 MODULE_AUTHOR("Hongju Wang <hongju.wang@intel.com>");
845 MODULE_LICENSE("GPL");
846 MODULE_DESCRIPTION("Intel IPU6 PCI driver");
847