1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ISM driver for s390.
4 *
5 * Copyright IBM Corp. 2018
6 */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/ctype.h>
16 #include <linux/processor.h>
17
18 #include "ism.h"
19
20 MODULE_DESCRIPTION("ISM driver for s390");
21 MODULE_LICENSE("GPL");
22
23 #define DRV_NAME "ism"
24
25 static const struct pci_device_id ism_device_table[] = {
26 { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
27 { 0, }
28 };
29 MODULE_DEVICE_TABLE(pci, ism_device_table);
30
31 static debug_info_t *ism_debug_info;
32
33 #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
34 static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
35 /* a list for fast mapping */
36 static u8 max_client;
37 static DEFINE_MUTEX(clients_lock);
38 static bool ism_v2_capable;
39 struct ism_dev_list {
40 struct list_head list;
41 struct mutex mutex; /* protects ism device list */
42 };
43
44 static struct ism_dev_list ism_dev_list = {
45 .list = LIST_HEAD_INIT(ism_dev_list.list),
46 .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
47 };
48
ism_setup_forwarding(struct ism_client * client,struct ism_dev * ism)49 static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
50 {
51 unsigned long flags;
52
53 spin_lock_irqsave(&ism->lock, flags);
54 ism->subs[client->id] = client;
55 spin_unlock_irqrestore(&ism->lock, flags);
56 }
57
ism_register_client(struct ism_client * client)58 int ism_register_client(struct ism_client *client)
59 {
60 struct ism_dev *ism;
61 int i, rc = -ENOSPC;
62
63 mutex_lock(&ism_dev_list.mutex);
64 mutex_lock(&clients_lock);
65 for (i = 0; i < MAX_CLIENTS; ++i) {
66 if (!clients[i]) {
67 clients[i] = client;
68 client->id = i;
69 if (i == max_client)
70 max_client++;
71 rc = 0;
72 break;
73 }
74 }
75 mutex_unlock(&clients_lock);
76
77 if (i < MAX_CLIENTS) {
78 /* initialize with all devices that we got so far */
79 list_for_each_entry(ism, &ism_dev_list.list, list) {
80 ism->priv[i] = NULL;
81 client->add(ism);
82 ism_setup_forwarding(client, ism);
83 }
84 }
85 mutex_unlock(&ism_dev_list.mutex);
86
87 return rc;
88 }
89 EXPORT_SYMBOL_GPL(ism_register_client);
90
ism_unregister_client(struct ism_client * client)91 int ism_unregister_client(struct ism_client *client)
92 {
93 struct ism_dev *ism;
94 unsigned long flags;
95 int rc = 0;
96
97 mutex_lock(&ism_dev_list.mutex);
98 list_for_each_entry(ism, &ism_dev_list.list, list) {
99 spin_lock_irqsave(&ism->lock, flags);
100 /* Stop forwarding IRQs and events */
101 ism->subs[client->id] = NULL;
102 for (int i = 0; i < ISM_NR_DMBS; ++i) {
103 if (ism->sba_client_arr[i] == client->id) {
104 WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
105 __func__, client->name);
106 rc = -EBUSY;
107 goto err_reg_dmb;
108 }
109 }
110 spin_unlock_irqrestore(&ism->lock, flags);
111 }
112 mutex_unlock(&ism_dev_list.mutex);
113
114 mutex_lock(&clients_lock);
115 clients[client->id] = NULL;
116 if (client->id + 1 == max_client)
117 max_client--;
118 mutex_unlock(&clients_lock);
119 return rc;
120
121 err_reg_dmb:
122 spin_unlock_irqrestore(&ism->lock, flags);
123 mutex_unlock(&ism_dev_list.mutex);
124 return rc;
125 }
126 EXPORT_SYMBOL_GPL(ism_unregister_client);
127
ism_cmd(struct ism_dev * ism,void * cmd)128 static int ism_cmd(struct ism_dev *ism, void *cmd)
129 {
130 struct ism_req_hdr *req = cmd;
131 struct ism_resp_hdr *resp = cmd;
132
133 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
134 __ism_write_cmd(ism, req, 0, sizeof(*req));
135
136 WRITE_ONCE(resp->ret, ISM_ERROR);
137
138 __ism_read_cmd(ism, resp, 0, sizeof(*resp));
139 if (resp->ret) {
140 debug_text_event(ism_debug_info, 0, "cmd failure");
141 debug_event(ism_debug_info, 0, resp, sizeof(*resp));
142 goto out;
143 }
144 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
145 out:
146 return resp->ret;
147 }
148
ism_cmd_simple(struct ism_dev * ism,u32 cmd_code)149 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
150 {
151 union ism_cmd_simple cmd;
152
153 memset(&cmd, 0, sizeof(cmd));
154 cmd.request.hdr.cmd = cmd_code;
155 cmd.request.hdr.len = sizeof(cmd.request);
156
157 return ism_cmd(ism, &cmd);
158 }
159
query_info(struct ism_dev * ism)160 static int query_info(struct ism_dev *ism)
161 {
162 union ism_qi cmd;
163
164 memset(&cmd, 0, sizeof(cmd));
165 cmd.request.hdr.cmd = ISM_QUERY_INFO;
166 cmd.request.hdr.len = sizeof(cmd.request);
167
168 if (ism_cmd(ism, &cmd))
169 goto out;
170
171 debug_text_event(ism_debug_info, 3, "query info");
172 debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
173 out:
174 return 0;
175 }
176
register_sba(struct ism_dev * ism)177 static int register_sba(struct ism_dev *ism)
178 {
179 union ism_reg_sba cmd;
180 dma_addr_t dma_handle;
181 struct ism_sba *sba;
182
183 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
184 GFP_KERNEL);
185 if (!sba)
186 return -ENOMEM;
187
188 memset(&cmd, 0, sizeof(cmd));
189 cmd.request.hdr.cmd = ISM_REG_SBA;
190 cmd.request.hdr.len = sizeof(cmd.request);
191 cmd.request.sba = dma_handle;
192
193 if (ism_cmd(ism, &cmd)) {
194 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
195 return -EIO;
196 }
197
198 ism->sba = sba;
199 ism->sba_dma_addr = dma_handle;
200
201 return 0;
202 }
203
register_ieq(struct ism_dev * ism)204 static int register_ieq(struct ism_dev *ism)
205 {
206 union ism_reg_ieq cmd;
207 dma_addr_t dma_handle;
208 struct ism_eq *ieq;
209
210 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
211 GFP_KERNEL);
212 if (!ieq)
213 return -ENOMEM;
214
215 memset(&cmd, 0, sizeof(cmd));
216 cmd.request.hdr.cmd = ISM_REG_IEQ;
217 cmd.request.hdr.len = sizeof(cmd.request);
218 cmd.request.ieq = dma_handle;
219 cmd.request.len = sizeof(*ieq);
220
221 if (ism_cmd(ism, &cmd)) {
222 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
223 return -EIO;
224 }
225
226 ism->ieq = ieq;
227 ism->ieq_idx = -1;
228 ism->ieq_dma_addr = dma_handle;
229
230 return 0;
231 }
232
unregister_sba(struct ism_dev * ism)233 static int unregister_sba(struct ism_dev *ism)
234 {
235 int ret;
236
237 if (!ism->sba)
238 return 0;
239
240 ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
241 if (ret && ret != ISM_ERROR)
242 return -EIO;
243
244 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
245 ism->sba, ism->sba_dma_addr);
246
247 ism->sba = NULL;
248 ism->sba_dma_addr = 0;
249
250 return 0;
251 }
252
unregister_ieq(struct ism_dev * ism)253 static int unregister_ieq(struct ism_dev *ism)
254 {
255 int ret;
256
257 if (!ism->ieq)
258 return 0;
259
260 ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
261 if (ret && ret != ISM_ERROR)
262 return -EIO;
263
264 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
265 ism->ieq, ism->ieq_dma_addr);
266
267 ism->ieq = NULL;
268 ism->ieq_dma_addr = 0;
269
270 return 0;
271 }
272
ism_read_local_gid(struct ism_dev * ism)273 static int ism_read_local_gid(struct ism_dev *ism)
274 {
275 union ism_read_gid cmd;
276 int ret;
277
278 memset(&cmd, 0, sizeof(cmd));
279 cmd.request.hdr.cmd = ISM_READ_GID;
280 cmd.request.hdr.len = sizeof(cmd.request);
281
282 ret = ism_cmd(ism, &cmd);
283 if (ret)
284 goto out;
285
286 ism->local_gid = cmd.response.gid;
287 out:
288 return ret;
289 }
290
ism_free_dmb(struct ism_dev * ism,struct ism_dmb * dmb)291 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
292 {
293 clear_bit(dmb->sba_idx, ism->sba_bitmap);
294 dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
295 DMA_FROM_DEVICE);
296 folio_put(virt_to_folio(dmb->cpu_addr));
297 }
298
ism_alloc_dmb(struct ism_dev * ism,struct ism_dmb * dmb)299 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
300 {
301 struct folio *folio;
302 unsigned long bit;
303 int rc;
304
305 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
306 return -EINVAL;
307
308 if (!dmb->sba_idx) {
309 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
310 ISM_DMB_BIT_OFFSET);
311 if (bit == ISM_NR_DMBS)
312 return -ENOSPC;
313
314 dmb->sba_idx = bit;
315 }
316 if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
317 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
318 return -EINVAL;
319
320 folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
321 __GFP_NORETRY, get_order(dmb->dmb_len));
322
323 if (!folio) {
324 rc = -ENOMEM;
325 goto out_bit;
326 }
327
328 dmb->cpu_addr = folio_address(folio);
329 dmb->dma_addr = dma_map_page(&ism->pdev->dev,
330 virt_to_page(dmb->cpu_addr), 0,
331 dmb->dmb_len, DMA_FROM_DEVICE);
332 if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
333 rc = -ENOMEM;
334 goto out_free;
335 }
336
337 return 0;
338
339 out_free:
340 kfree(dmb->cpu_addr);
341 out_bit:
342 clear_bit(dmb->sba_idx, ism->sba_bitmap);
343 return rc;
344 }
345
ism_register_dmb(struct ism_dev * ism,struct ism_dmb * dmb,struct ism_client * client)346 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
347 struct ism_client *client)
348 {
349 union ism_reg_dmb cmd;
350 unsigned long flags;
351 int ret;
352
353 ret = ism_alloc_dmb(ism, dmb);
354 if (ret)
355 goto out;
356
357 memset(&cmd, 0, sizeof(cmd));
358 cmd.request.hdr.cmd = ISM_REG_DMB;
359 cmd.request.hdr.len = sizeof(cmd.request);
360
361 cmd.request.dmb = dmb->dma_addr;
362 cmd.request.dmb_len = dmb->dmb_len;
363 cmd.request.sba_idx = dmb->sba_idx;
364 cmd.request.vlan_valid = dmb->vlan_valid;
365 cmd.request.vlan_id = dmb->vlan_id;
366 cmd.request.rgid = dmb->rgid;
367
368 ret = ism_cmd(ism, &cmd);
369 if (ret) {
370 ism_free_dmb(ism, dmb);
371 goto out;
372 }
373 dmb->dmb_tok = cmd.response.dmb_tok;
374 spin_lock_irqsave(&ism->lock, flags);
375 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
376 spin_unlock_irqrestore(&ism->lock, flags);
377 out:
378 return ret;
379 }
380 EXPORT_SYMBOL_GPL(ism_register_dmb);
381
ism_unregister_dmb(struct ism_dev * ism,struct ism_dmb * dmb)382 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
383 {
384 union ism_unreg_dmb cmd;
385 unsigned long flags;
386 int ret;
387
388 memset(&cmd, 0, sizeof(cmd));
389 cmd.request.hdr.cmd = ISM_UNREG_DMB;
390 cmd.request.hdr.len = sizeof(cmd.request);
391
392 cmd.request.dmb_tok = dmb->dmb_tok;
393
394 spin_lock_irqsave(&ism->lock, flags);
395 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
396 spin_unlock_irqrestore(&ism->lock, flags);
397
398 ret = ism_cmd(ism, &cmd);
399 if (ret && ret != ISM_ERROR)
400 goto out;
401
402 ism_free_dmb(ism, dmb);
403 out:
404 return ret;
405 }
406 EXPORT_SYMBOL_GPL(ism_unregister_dmb);
407
ism_add_vlan_id(struct ism_dev * ism,u64 vlan_id)408 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
409 {
410 union ism_set_vlan_id cmd;
411
412 memset(&cmd, 0, sizeof(cmd));
413 cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
414 cmd.request.hdr.len = sizeof(cmd.request);
415
416 cmd.request.vlan_id = vlan_id;
417
418 return ism_cmd(ism, &cmd);
419 }
420
ism_del_vlan_id(struct ism_dev * ism,u64 vlan_id)421 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
422 {
423 union ism_set_vlan_id cmd;
424
425 memset(&cmd, 0, sizeof(cmd));
426 cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
427 cmd.request.hdr.len = sizeof(cmd.request);
428
429 cmd.request.vlan_id = vlan_id;
430
431 return ism_cmd(ism, &cmd);
432 }
433
max_bytes(unsigned int start,unsigned int len,unsigned int boundary)434 static unsigned int max_bytes(unsigned int start, unsigned int len,
435 unsigned int boundary)
436 {
437 return min(boundary - (start & (boundary - 1)), len);
438 }
439
ism_move(struct ism_dev * ism,u64 dmb_tok,unsigned int idx,bool sf,unsigned int offset,void * data,unsigned int size)440 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
441 unsigned int offset, void *data, unsigned int size)
442 {
443 unsigned int bytes;
444 u64 dmb_req;
445 int ret;
446
447 while (size) {
448 bytes = max_bytes(offset, size, PAGE_SIZE);
449 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
450 offset);
451
452 ret = __ism_move(ism, dmb_req, data, bytes);
453 if (ret)
454 return ret;
455
456 size -= bytes;
457 data += bytes;
458 offset += bytes;
459 }
460
461 return 0;
462 }
463 EXPORT_SYMBOL_GPL(ism_move);
464
ism_handle_event(struct ism_dev * ism)465 static void ism_handle_event(struct ism_dev *ism)
466 {
467 struct ism_event *entry;
468 struct ism_client *clt;
469 int i;
470
471 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
472 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
473 ism->ieq_idx = 0;
474
475 entry = &ism->ieq->entry[ism->ieq_idx];
476 debug_event(ism_debug_info, 2, entry, sizeof(*entry));
477 for (i = 0; i < max_client; ++i) {
478 clt = ism->subs[i];
479 if (clt)
480 clt->handle_event(ism, entry);
481 }
482 }
483 }
484
ism_handle_irq(int irq,void * data)485 static irqreturn_t ism_handle_irq(int irq, void *data)
486 {
487 struct ism_dev *ism = data;
488 unsigned long bit, end;
489 unsigned long *bv;
490 u16 dmbemask;
491 u8 client_id;
492
493 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
494 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
495
496 spin_lock(&ism->lock);
497 ism->sba->s = 0;
498 barrier();
499 for (bit = 0;;) {
500 bit = find_next_bit_inv(bv, end, bit);
501 if (bit >= end)
502 break;
503
504 clear_bit_inv(bit, bv);
505 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
506 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
507 barrier();
508 client_id = ism->sba_client_arr[bit];
509 if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
510 continue;
511 ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
512 }
513
514 if (ism->sba->e) {
515 ism->sba->e = 0;
516 barrier();
517 ism_handle_event(ism);
518 }
519 spin_unlock(&ism->lock);
520 return IRQ_HANDLED;
521 }
522
ism_dev_init(struct ism_dev * ism)523 static int ism_dev_init(struct ism_dev *ism)
524 {
525 struct pci_dev *pdev = ism->pdev;
526 int i, ret;
527
528 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
529 if (ret <= 0)
530 goto out;
531
532 ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
533 if (!ism->sba_client_arr)
534 goto free_vectors;
535 memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
536
537 ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
538 pci_name(pdev), ism);
539 if (ret)
540 goto free_client_arr;
541
542 ret = register_sba(ism);
543 if (ret)
544 goto free_irq;
545
546 ret = register_ieq(ism);
547 if (ret)
548 goto unreg_sba;
549
550 ret = ism_read_local_gid(ism);
551 if (ret)
552 goto unreg_ieq;
553
554 if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
555 /* hardware is V2 capable */
556 ism_v2_capable = true;
557 else
558 ism_v2_capable = false;
559
560 mutex_lock(&ism_dev_list.mutex);
561 mutex_lock(&clients_lock);
562 for (i = 0; i < max_client; ++i) {
563 if (clients[i]) {
564 clients[i]->add(ism);
565 ism_setup_forwarding(clients[i], ism);
566 }
567 }
568 mutex_unlock(&clients_lock);
569
570 list_add(&ism->list, &ism_dev_list.list);
571 mutex_unlock(&ism_dev_list.mutex);
572
573 query_info(ism);
574 return 0;
575
576 unreg_ieq:
577 unregister_ieq(ism);
578 unreg_sba:
579 unregister_sba(ism);
580 free_irq:
581 free_irq(pci_irq_vector(pdev, 0), ism);
582 free_client_arr:
583 kfree(ism->sba_client_arr);
584 free_vectors:
585 pci_free_irq_vectors(pdev);
586 out:
587 return ret;
588 }
589
ism_dev_release(struct device * dev)590 static void ism_dev_release(struct device *dev)
591 {
592 struct ism_dev *ism;
593
594 ism = container_of(dev, struct ism_dev, dev);
595
596 kfree(ism);
597 }
598
ism_probe(struct pci_dev * pdev,const struct pci_device_id * id)599 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
600 {
601 struct ism_dev *ism;
602 int ret;
603
604 ism = kzalloc(sizeof(*ism), GFP_KERNEL);
605 if (!ism)
606 return -ENOMEM;
607
608 spin_lock_init(&ism->lock);
609 dev_set_drvdata(&pdev->dev, ism);
610 ism->pdev = pdev;
611 ism->dev.parent = &pdev->dev;
612 ism->dev.release = ism_dev_release;
613 device_initialize(&ism->dev);
614 dev_set_name(&ism->dev, dev_name(&pdev->dev));
615 ret = device_add(&ism->dev);
616 if (ret)
617 goto err_dev;
618
619 ret = pci_enable_device_mem(pdev);
620 if (ret)
621 goto err;
622
623 ret = pci_request_mem_regions(pdev, DRV_NAME);
624 if (ret)
625 goto err_disable;
626
627 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
628 if (ret)
629 goto err_resource;
630
631 dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
632 dma_set_max_seg_size(&pdev->dev, SZ_1M);
633 pci_set_master(pdev);
634
635 ret = ism_dev_init(ism);
636 if (ret)
637 goto err_resource;
638
639 return 0;
640
641 err_resource:
642 pci_release_mem_regions(pdev);
643 err_disable:
644 pci_disable_device(pdev);
645 err:
646 device_del(&ism->dev);
647 err_dev:
648 dev_set_drvdata(&pdev->dev, NULL);
649 put_device(&ism->dev);
650
651 return ret;
652 }
653
ism_dev_exit(struct ism_dev * ism)654 static void ism_dev_exit(struct ism_dev *ism)
655 {
656 struct pci_dev *pdev = ism->pdev;
657 unsigned long flags;
658 int i;
659
660 spin_lock_irqsave(&ism->lock, flags);
661 for (i = 0; i < max_client; ++i)
662 ism->subs[i] = NULL;
663 spin_unlock_irqrestore(&ism->lock, flags);
664
665 mutex_lock(&ism_dev_list.mutex);
666 mutex_lock(&clients_lock);
667 for (i = 0; i < max_client; ++i) {
668 if (clients[i])
669 clients[i]->remove(ism);
670 }
671 mutex_unlock(&clients_lock);
672
673 if (ism_v2_capable)
674 ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
675 unregister_ieq(ism);
676 unregister_sba(ism);
677 free_irq(pci_irq_vector(pdev, 0), ism);
678 kfree(ism->sba_client_arr);
679 pci_free_irq_vectors(pdev);
680 list_del_init(&ism->list);
681 mutex_unlock(&ism_dev_list.mutex);
682 }
683
ism_remove(struct pci_dev * pdev)684 static void ism_remove(struct pci_dev *pdev)
685 {
686 struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
687
688 ism_dev_exit(ism);
689
690 pci_release_mem_regions(pdev);
691 pci_disable_device(pdev);
692 device_del(&ism->dev);
693 dev_set_drvdata(&pdev->dev, NULL);
694 put_device(&ism->dev);
695 }
696
697 static struct pci_driver ism_driver = {
698 .name = DRV_NAME,
699 .id_table = ism_device_table,
700 .probe = ism_probe,
701 .remove = ism_remove,
702 };
703
ism_init(void)704 static int __init ism_init(void)
705 {
706 int ret;
707
708 ism_debug_info = debug_register("ism", 2, 1, 16);
709 if (!ism_debug_info)
710 return -ENODEV;
711
712 memset(clients, 0, sizeof(clients));
713 max_client = 0;
714 debug_register_view(ism_debug_info, &debug_hex_ascii_view);
715 ret = pci_register_driver(&ism_driver);
716 if (ret)
717 debug_unregister(ism_debug_info);
718
719 return ret;
720 }
721
ism_exit(void)722 static void __exit ism_exit(void)
723 {
724 pci_unregister_driver(&ism_driver);
725 debug_unregister(ism_debug_info);
726 }
727
728 module_init(ism_init);
729 module_exit(ism_exit);
730
731 /*************************** SMC-D Implementation *****************************/
732
733 #if IS_ENABLED(CONFIG_SMC)
ism_query_rgid(struct ism_dev * ism,u64 rgid,u32 vid_valid,u32 vid)734 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
735 u32 vid)
736 {
737 union ism_query_rgid cmd;
738
739 memset(&cmd, 0, sizeof(cmd));
740 cmd.request.hdr.cmd = ISM_QUERY_RGID;
741 cmd.request.hdr.len = sizeof(cmd.request);
742
743 cmd.request.rgid = rgid;
744 cmd.request.vlan_valid = vid_valid;
745 cmd.request.vlan_id = vid;
746
747 return ism_cmd(ism, &cmd);
748 }
749
smcd_query_rgid(struct smcd_dev * smcd,struct smcd_gid * rgid,u32 vid_valid,u32 vid)750 static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
751 u32 vid_valid, u32 vid)
752 {
753 return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
754 }
755
smcd_register_dmb(struct smcd_dev * smcd,struct smcd_dmb * dmb,void * client)756 static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
757 void *client)
758 {
759 return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
760 }
761
smcd_unregister_dmb(struct smcd_dev * smcd,struct smcd_dmb * dmb)762 static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
763 {
764 return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
765 }
766
smcd_add_vlan_id(struct smcd_dev * smcd,u64 vlan_id)767 static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
768 {
769 return ism_add_vlan_id(smcd->priv, vlan_id);
770 }
771
smcd_del_vlan_id(struct smcd_dev * smcd,u64 vlan_id)772 static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
773 {
774 return ism_del_vlan_id(smcd->priv, vlan_id);
775 }
776
smcd_set_vlan_required(struct smcd_dev * smcd)777 static int smcd_set_vlan_required(struct smcd_dev *smcd)
778 {
779 return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
780 }
781
smcd_reset_vlan_required(struct smcd_dev * smcd)782 static int smcd_reset_vlan_required(struct smcd_dev *smcd)
783 {
784 return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
785 }
786
ism_signal_ieq(struct ism_dev * ism,u64 rgid,u32 trigger_irq,u32 event_code,u64 info)787 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
788 u32 event_code, u64 info)
789 {
790 union ism_sig_ieq cmd;
791
792 memset(&cmd, 0, sizeof(cmd));
793 cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
794 cmd.request.hdr.len = sizeof(cmd.request);
795
796 cmd.request.rgid = rgid;
797 cmd.request.trigger_irq = trigger_irq;
798 cmd.request.event_code = event_code;
799 cmd.request.info = info;
800
801 return ism_cmd(ism, &cmd);
802 }
803
smcd_signal_ieq(struct smcd_dev * smcd,struct smcd_gid * rgid,u32 trigger_irq,u32 event_code,u64 info)804 static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
805 u32 trigger_irq, u32 event_code, u64 info)
806 {
807 return ism_signal_ieq(smcd->priv, rgid->gid,
808 trigger_irq, event_code, info);
809 }
810
smcd_move(struct smcd_dev * smcd,u64 dmb_tok,unsigned int idx,bool sf,unsigned int offset,void * data,unsigned int size)811 static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
812 bool sf, unsigned int offset, void *data,
813 unsigned int size)
814 {
815 return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
816 }
817
smcd_supports_v2(void)818 static int smcd_supports_v2(void)
819 {
820 return ism_v2_capable;
821 }
822
ism_get_local_gid(struct ism_dev * ism)823 static u64 ism_get_local_gid(struct ism_dev *ism)
824 {
825 return ism->local_gid;
826 }
827
smcd_get_local_gid(struct smcd_dev * smcd,struct smcd_gid * smcd_gid)828 static void smcd_get_local_gid(struct smcd_dev *smcd,
829 struct smcd_gid *smcd_gid)
830 {
831 smcd_gid->gid = ism_get_local_gid(smcd->priv);
832 smcd_gid->gid_ext = 0;
833 }
834
ism_get_chid(struct ism_dev * ism)835 static u16 ism_get_chid(struct ism_dev *ism)
836 {
837 if (!ism || !ism->pdev)
838 return 0;
839
840 return to_zpci(ism->pdev)->pchid;
841 }
842
smcd_get_chid(struct smcd_dev * smcd)843 static u16 smcd_get_chid(struct smcd_dev *smcd)
844 {
845 return ism_get_chid(smcd->priv);
846 }
847
smcd_get_dev(struct smcd_dev * dev)848 static inline struct device *smcd_get_dev(struct smcd_dev *dev)
849 {
850 struct ism_dev *ism = dev->priv;
851
852 return &ism->dev;
853 }
854
855 static const struct smcd_ops ism_ops = {
856 .query_remote_gid = smcd_query_rgid,
857 .register_dmb = smcd_register_dmb,
858 .unregister_dmb = smcd_unregister_dmb,
859 .add_vlan_id = smcd_add_vlan_id,
860 .del_vlan_id = smcd_del_vlan_id,
861 .set_vlan_required = smcd_set_vlan_required,
862 .reset_vlan_required = smcd_reset_vlan_required,
863 .signal_event = smcd_signal_ieq,
864 .move_data = smcd_move,
865 .supports_v2 = smcd_supports_v2,
866 .get_local_gid = smcd_get_local_gid,
867 .get_chid = smcd_get_chid,
868 .get_dev = smcd_get_dev,
869 };
870
ism_get_smcd_ops(void)871 const struct smcd_ops *ism_get_smcd_ops(void)
872 {
873 return &ism_ops;
874 }
875 EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
876 #endif
877