1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "adf_4xxx_hw_data.h"
8 #include "adf_gen4_hw_data.h"
9 #include "adf_fw_counters.h"
10 #include "adf_cfg_device.h"
11 #include "adf_dbgfs.h"
12 #include <sys/types.h>
13 #include <sys/kernel.h>
14 #include <sys/malloc.h>
15 #include <machine/bus_dma.h>
16 #include <dev/pci/pcireg.h>
17
18 static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
19
20 #define ADF_SYSTEM_DEVICE(device_id) \
21 { \
22 PCI_VENDOR_ID_INTEL, device_id \
23 }
24
25 static const struct pci_device_id adf_pci_tbl[] = {
26 ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
27 ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
28 ADF_SYSTEM_DEVICE(ADF_402XX_PCI_DEVICE_ID),
29 {
30 0,
31 }
32 };
33
34 static int
adf_probe(device_t dev)35 adf_probe(device_t dev)
36 {
37 const struct pci_device_id *id;
38
39 for (id = adf_pci_tbl; id->vendor != 0; id++) {
40 if (pci_get_vendor(dev) == id->vendor &&
41 pci_get_device(dev) == id->device) {
42 device_set_desc(dev,
43 "Intel " ADF_4XXX_DEVICE_NAME
44 " QuickAssist");
45 return BUS_PROBE_GENERIC;
46 }
47 }
48 return ENXIO;
49 }
50
51 #ifdef QAT_DISABLE_SAFE_DC_MODE
adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS)52 static int adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS)
53 {
54 struct adf_accel_dev *accel_dev = arg1;
55 int error, value = accel_dev->disable_safe_dc_mode;
56
57 error = sysctl_handle_int(oidp, &value, 0, req);
58 if (error || !req->newptr)
59 return error;
60
61 if (value != 1 && value != 0)
62 return EINVAL;
63
64 if (adf_dev_started(accel_dev)) {
65 device_printf(
66 GET_DEV(accel_dev),
67 "QAT: configuration can only be changed in \"down\" device state\n");
68 return EBUSY;
69 }
70
71 accel_dev->disable_safe_dc_mode = (u8)value;
72
73 return 0;
74 }
75
76 static void
adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev * accel_dev)77 adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev *accel_dev)
78 {
79 struct sysctl_ctx_list *qat_sysctl_ctx;
80 struct sysctl_oid *qat_sysctl_tree;
81
82 qat_sysctl_ctx =
83 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
84 qat_sysctl_tree =
85 device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
86 accel_dev->safe_dc_mode =
87 SYSCTL_ADD_OID(qat_sysctl_ctx,
88 SYSCTL_CHILDREN(qat_sysctl_tree),
89 OID_AUTO,
90 "disable_safe_dc_mode",
91 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_TUN |
92 CTLFLAG_SKIP,
93 accel_dev,
94 0,
95 adf_4xxx_sysctl_disable_safe_dc_mode,
96 "LU",
97 "Disable QAT safe data compression mode");
98 }
99
100 static void
adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev * accel_dev)101 adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev *accel_dev)
102 {
103 int ret;
104 struct sysctl_ctx_list *qat_sysctl_ctx =
105 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
106
107 ret = sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->safe_dc_mode);
108 if (ret) {
109 device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
110 } else {
111 ret = sysctl_remove_oid(accel_dev->safe_dc_mode, 1, 1);
112 if (ret)
113 device_printf(GET_DEV(accel_dev),
114 "Failed to delete oid\n");
115 }
116 }
117 #endif /* QAT_DISABLE_SAFE_DC_MODE */
118
119 static void
adf_cleanup_accel(struct adf_accel_dev * accel_dev)120 adf_cleanup_accel(struct adf_accel_dev *accel_dev)
121 {
122 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
123 int i;
124
125 if (accel_dev->dma_tag)
126 bus_dma_tag_destroy(accel_dev->dma_tag);
127 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
128 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
129
130 if (bar->virt_addr)
131 bus_free_resource(accel_pci_dev->pci_dev,
132 SYS_RES_MEMORY,
133 bar->virt_addr);
134 }
135
136 if (accel_dev->hw_device) {
137 switch (pci_get_device(accel_pci_dev->pci_dev)) {
138 case ADF_4XXX_PCI_DEVICE_ID:
139 case ADF_401XX_PCI_DEVICE_ID:
140 case ADF_402XX_PCI_DEVICE_ID:
141 adf_clean_hw_data_4xxx(accel_dev->hw_device);
142 break;
143 default:
144 break;
145 }
146 free(accel_dev->hw_device, M_QAT_4XXX);
147 accel_dev->hw_device = NULL;
148 }
149 #ifdef QAT_DISABLE_SAFE_DC_MODE
150 adf_4xxx_disable_safe_dc_sysctl_remove(accel_dev);
151 #endif /* QAT_DISABLE_SAFE_DC_MODE */
152 adf_dbgfs_exit(accel_dev);
153 adf_cfg_dev_remove(accel_dev);
154 adf_devmgr_rm_dev(accel_dev, NULL);
155 }
156
157 static int
adf_attach(device_t dev)158 adf_attach(device_t dev)
159 {
160 struct adf_accel_dev *accel_dev;
161 struct adf_accel_pci *accel_pci_dev;
162 struct adf_hw_device_data *hw_data;
163 unsigned int bar_nr;
164 int ret = 0, rid;
165 struct adf_cfg_device *cfg_dev = NULL;
166
167 /* Set pci MaxPayLoad to 512. Implemented to avoid the issue of
168 * Pci-passthrough causing Maxpayload to be reset to 128 bytes
169 * when the device is reset.
170 */
171 if (pci_get_max_payload(dev) != 512)
172 pci_set_max_payload(dev, 512);
173
174 accel_dev = device_get_softc(dev);
175
176 mutex_init(&accel_dev->lock);
177 INIT_LIST_HEAD(&accel_dev->crypto_list);
178 accel_pci_dev = &accel_dev->accel_pci_dev;
179 accel_pci_dev->pci_dev = dev;
180
181 if (bus_get_domain(dev, &accel_pci_dev->node) != 0)
182 accel_pci_dev->node = 0;
183
184 /* Add accel device to accel table.
185 * This should be called before adf_cleanup_accel is called
186 */
187 ret = adf_devmgr_add_dev(accel_dev, NULL);
188 if (ret) {
189 device_printf(dev, "Failed to add new accelerator device.\n");
190 goto out_err_lock;
191 }
192
193 /* Allocate and configure device configuration structure */
194 hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO);
195
196 accel_dev->hw_device = hw_data;
197 adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev));
198 accel_pci_dev->revid = pci_get_revid(dev);
199 hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
200
201 /* Get PPAERUCM values and store */
202 ret = adf_aer_store_ppaerucm_reg(dev, hw_data);
203 if (ret)
204 goto out_err;
205
206 /* Get Accelerators and Accelerators Engines masks */
207 hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
208 hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
209
210 accel_pci_dev->sku = hw_data->get_sku(hw_data);
211 /* If the device has no acceleration engines then ignore it. */
212 if (!hw_data->accel_mask || !hw_data->ae_mask ||
213 (~hw_data->ae_mask & 0x01)) {
214 device_printf(dev, "No acceleration units found\n");
215 ret = ENXIO;
216 goto out_err;
217 }
218
219 /* Create device configuration table */
220 ret = adf_cfg_dev_add(accel_dev);
221 if (ret)
222 goto out_err;
223 ret = adf_clock_debugfs_add(accel_dev);
224 if (ret)
225 goto out_err;
226
227 #ifdef QAT_DISABLE_SAFE_DC_MODE
228 adf_4xxx_disable_safe_dc_sysctl_add(accel_dev);
229 #endif /* QAT_DISABLE_SAFE_DC_MODE */
230
231 pci_set_max_read_req(dev, 4096);
232
233 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
234 1,
235 0,
236 BUS_SPACE_MAXADDR,
237 BUS_SPACE_MAXADDR,
238 NULL,
239 NULL,
240 BUS_SPACE_MAXSIZE,
241 /* BUS_SPACE_UNRESTRICTED */ 1,
242 BUS_SPACE_MAXSIZE,
243 0,
244 NULL,
245 NULL,
246 &accel_dev->dma_tag);
247 if (ret)
248 goto out_err;
249
250 if (hw_data->get_accel_cap) {
251 hw_data->accel_capabilities_mask =
252 hw_data->get_accel_cap(accel_dev);
253 }
254
255 /* Find and map all the device's BARS */
256 /* Logical BARs configuration for 64bit BARs:
257 bar 0 and 1 - logical BAR0
258 bar 2 and 3 - logical BAR1
259 bar 4 and 5 - logical BAR3
260 */
261 for (bar_nr = 0;
262 bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0;
263 bar_nr += 2) {
264 struct adf_bar *bar;
265
266 rid = PCIR_BAR(bar_nr);
267 bar = &accel_pci_dev->pci_bars[bar_nr / 2];
268
269 bar->virt_addr = bus_alloc_resource_any(dev,
270 SYS_RES_MEMORY,
271 &rid,
272 RF_ACTIVE);
273 if (!bar->virt_addr) {
274 device_printf(dev, "Failed to map BAR %d\n", bar_nr);
275 ret = ENXIO;
276 goto out_err;
277 }
278 bar->base_addr = rman_get_start(bar->virt_addr);
279 bar->size = rman_get_size(bar->virt_addr);
280 }
281 ret = pci_enable_busmaster(dev);
282 if (ret)
283 goto out_err;
284
285 adf_dbgfs_init(accel_dev);
286
287 if (!accel_dev->hw_device->config_device) {
288 ret = EFAULT;
289 goto out_err_disable;
290 }
291
292 ret = accel_dev->hw_device->config_device(accel_dev);
293 if (ret)
294 goto out_err_disable;
295
296 ret = adf_dev_init(accel_dev);
297 if (ret)
298 goto out_dev_shutdown;
299
300 ret = adf_dev_start(accel_dev);
301 if (ret)
302 goto out_dev_stop;
303
304 cfg_dev = accel_dev->cfg->dev;
305 adf_cfg_device_clear(cfg_dev, accel_dev);
306 free(cfg_dev, M_QAT);
307 accel_dev->cfg->dev = NULL;
308 return ret;
309 out_dev_stop:
310 adf_dev_stop(accel_dev);
311 out_dev_shutdown:
312 adf_dev_shutdown(accel_dev);
313 out_err_disable:
314 pci_disable_busmaster(dev);
315 out_err:
316 adf_cleanup_accel(accel_dev);
317 out_err_lock:
318 mutex_destroy(&accel_dev->lock);
319
320 return ret;
321 }
322
323 static int
adf_detach(device_t dev)324 adf_detach(device_t dev)
325 {
326 struct adf_accel_dev *accel_dev = device_get_softc(dev);
327
328 if (adf_dev_stop(accel_dev)) {
329 device_printf(dev, "Failed to stop QAT accel dev\n");
330 return EBUSY;
331 }
332
333 adf_dev_shutdown(accel_dev);
334
335 pci_disable_busmaster(dev);
336 adf_cleanup_accel(accel_dev);
337 mutex_destroy(&accel_dev->lock);
338
339 return 0;
340 }
341
342 static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe),
343 DEVMETHOD(device_attach, adf_attach),
344 DEVMETHOD(device_detach, adf_detach),
345
346 DEVMETHOD_END };
347
348 static driver_t adf_driver = { "qat",
349 adf_methods,
350 sizeof(struct adf_accel_dev) };
351
352 DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD);
353 MODULE_VERSION(qat_4xxx, 1);
354 MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1);
355 MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1);
356 MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1);
357