xref: /linux/net/smc/smc_ism.c (revision 719c3b67bb7ea95bb8158b03c75641c8fc8f94a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Shared Memory Communications Direct over ISM devices (SMC-D)
3  *
4  * Functions for ISM device.
5  *
6  * Copyright IBM Corp. 2018
7  */
8 
9 #include <linux/if_vlan.h>
10 #include <linux/spinlock.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <asm/page.h>
14 
15 #include "smc.h"
16 #include "smc_core.h"
17 #include "smc_ism.h"
18 #include "smc_loopback.h"
19 #include "smc_pnet.h"
20 #include "smc_netlink.h"
21 #include "linux/ism.h"
22 #include "linux/dibs.h"
23 
24 struct smcd_dev_list smcd_dev_list = {
25 	.list = LIST_HEAD_INIT(smcd_dev_list.list),
26 	.mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
27 };
28 
29 static bool smc_ism_v2_capable;
30 static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN];
31 
32 static void smcd_register_dev(struct dibs_dev *dibs);
33 static void smcd_unregister_dev(struct dibs_dev *dibs);
34 #if IS_ENABLED(CONFIG_ISM)
35 static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event);
36 static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
37 			    u16 dmbemask);
38 
39 static struct ism_client smc_ism_client = {
40 	.name = "SMC-D",
41 	.handle_event = smcd_handle_event,
42 	.handle_irq = smcd_handle_irq,
43 };
44 #endif
45 static struct dibs_client_ops smc_client_ops = {
46 	.add_dev = smcd_register_dev,
47 	.del_dev = smcd_unregister_dev,
48 };
49 
50 static struct dibs_client smc_dibs_client = {
51 	.name = "SMC-D",
52 	.ops = &smc_client_ops,
53 };
54 
55 static void smc_ism_create_system_eid(void)
56 {
57 	struct smc_ism_seid *seid =
58 		(struct smc_ism_seid *)smc_ism_v2_system_eid;
59 #if IS_ENABLED(CONFIG_S390)
60 	struct cpuid id;
61 	u16 ident_tail;
62 	char tmp[5];
63 
64 	memcpy(seid->seid_string, "IBM-SYSZ-ISMSEID00000000", 24);
65 	get_cpu_id(&id);
66 	ident_tail = (u16)(id.ident & SMC_ISM_IDENT_MASK);
67 	snprintf(tmp, 5, "%04X", ident_tail);
68 	memcpy(seid->serial_number, tmp, 4);
69 	snprintf(tmp, 5, "%04X", id.machine);
70 	memcpy(seid->type, tmp, 4);
71 #else
72 	memset(seid, 0, SMC_MAX_EID_LEN);
73 #endif
74 }
75 
76 /* Test if an ISM communication is possible - same CPC */
77 int smc_ism_cantalk(struct smcd_gid *peer_gid, unsigned short vlan_id,
78 		    struct smcd_dev *smcd)
79 {
80 	struct dibs_dev *dibs = smcd->dibs;
81 	uuid_t ism_rgid;
82 
83 	copy_to_dibsgid(&ism_rgid, peer_gid);
84 	return dibs->ops->query_remote_gid(dibs, &ism_rgid, vlan_id ? 1 : 0,
85 					  vlan_id);
86 }
87 
88 void smc_ism_get_system_eid(u8 **eid)
89 {
90 	if (!smc_ism_v2_capable)
91 		*eid = NULL;
92 	else
93 		*eid = smc_ism_v2_system_eid;
94 }
95 
96 u16 smc_ism_get_chid(struct smcd_dev *smcd)
97 {
98 	return smcd->dibs->ops->get_fabric_id(smcd->dibs);
99 }
100 
101 /* HW supports ISM V2 and thus System EID is defined */
102 bool smc_ism_is_v2_capable(void)
103 {
104 	return smc_ism_v2_capable;
105 }
106 
107 void smc_ism_set_v2_capable(void)
108 {
109 	smc_ism_v2_capable = true;
110 }
111 
112 /* Set a connection using this DMBE. */
113 void smc_ism_set_conn(struct smc_connection *conn)
114 {
115 	unsigned long flags;
116 
117 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
118 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
119 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
120 }
121 
122 /* Unset a connection using this DMBE. */
123 void smc_ism_unset_conn(struct smc_connection *conn)
124 {
125 	unsigned long flags;
126 
127 	if (!conn->rmb_desc)
128 		return;
129 
130 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
131 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
132 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
133 }
134 
135 /* Register a VLAN identifier with the ISM device. Use a reference count
136  * and add a VLAN identifier only when the first DMB using this VLAN is
137  * registered.
138  */
139 int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
140 {
141 	struct smc_ism_vlanid *new_vlan, *vlan;
142 	unsigned long flags;
143 	int rc = 0;
144 
145 	if (!vlanid)			/* No valid vlan id */
146 		return -EINVAL;
147 	if (!smcd->dibs->ops->add_vlan_id)
148 		return -EOPNOTSUPP;
149 
150 	/* create new vlan entry, in case we need it */
151 	new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
152 	if (!new_vlan)
153 		return -ENOMEM;
154 	new_vlan->vlanid = vlanid;
155 	refcount_set(&new_vlan->refcnt, 1);
156 
157 	/* if there is an existing entry, increase count and return */
158 	spin_lock_irqsave(&smcd->lock, flags);
159 	list_for_each_entry(vlan, &smcd->vlan, list) {
160 		if (vlan->vlanid == vlanid) {
161 			refcount_inc(&vlan->refcnt);
162 			kfree(new_vlan);
163 			goto out;
164 		}
165 	}
166 
167 	/* no existing entry found.
168 	 * add new entry to device; might fail, e.g., if HW limit reached
169 	 */
170 	if (smcd->dibs->ops->add_vlan_id(smcd->dibs, vlanid)) {
171 		kfree(new_vlan);
172 		rc = -EIO;
173 		goto out;
174 	}
175 	list_add_tail(&new_vlan->list, &smcd->vlan);
176 out:
177 	spin_unlock_irqrestore(&smcd->lock, flags);
178 	return rc;
179 }
180 
181 /* Unregister a VLAN identifier with the ISM device. Use a reference count
182  * and remove a VLAN identifier only when the last DMB using this VLAN is
183  * unregistered.
184  */
185 int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
186 {
187 	struct smc_ism_vlanid *vlan;
188 	unsigned long flags;
189 	bool found = false;
190 	int rc = 0;
191 
192 	if (!vlanid)			/* No valid vlan id */
193 		return -EINVAL;
194 	if (!smcd->dibs->ops->del_vlan_id)
195 		return -EOPNOTSUPP;
196 
197 	spin_lock_irqsave(&smcd->lock, flags);
198 	list_for_each_entry(vlan, &smcd->vlan, list) {
199 		if (vlan->vlanid == vlanid) {
200 			if (!refcount_dec_and_test(&vlan->refcnt))
201 				goto out;
202 			found = true;
203 			break;
204 		}
205 	}
206 	if (!found) {
207 		rc = -ENOENT;
208 		goto out;		/* VLAN id not in table */
209 	}
210 
211 	/* Found and the last reference just gone */
212 	if (smcd->dibs->ops->del_vlan_id(smcd->dibs, vlanid))
213 		rc = -EIO;
214 	list_del(&vlan->list);
215 	kfree(vlan);
216 out:
217 	spin_unlock_irqrestore(&smcd->lock, flags);
218 	return rc;
219 }
220 
221 void smc_ism_unregister_dmb(struct smcd_dev *smcd,
222 			    struct smc_buf_desc *dmb_desc)
223 {
224 	struct smcd_dmb dmb;
225 
226 	if (!dmb_desc->dma_addr)
227 		return;
228 
229 	memset(&dmb, 0, sizeof(dmb));
230 	dmb.dmb_tok = dmb_desc->token;
231 	dmb.sba_idx = dmb_desc->sba_idx;
232 	dmb.cpu_addr = dmb_desc->cpu_addr;
233 	dmb.dma_addr = dmb_desc->dma_addr;
234 	dmb.dmb_len = dmb_desc->len;
235 	smcd->ops->unregister_dmb(smcd, &dmb);
236 
237 	return;
238 }
239 
240 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
241 			 struct smc_buf_desc *dmb_desc)
242 {
243 	struct smcd_dmb dmb;
244 	int rc;
245 
246 	memset(&dmb, 0, sizeof(dmb));
247 	dmb.dmb_len = dmb_len;
248 	dmb.sba_idx = dmb_desc->sba_idx;
249 	dmb.vlan_id = lgr->vlan_id;
250 	dmb.rgid = lgr->peer_gid.gid;
251 	rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, lgr->smcd->client);
252 	if (!rc) {
253 		dmb_desc->sba_idx = dmb.sba_idx;
254 		dmb_desc->token = dmb.dmb_tok;
255 		dmb_desc->cpu_addr = dmb.cpu_addr;
256 		dmb_desc->dma_addr = dmb.dma_addr;
257 		dmb_desc->len = dmb.dmb_len;
258 	}
259 	return rc;
260 }
261 
262 bool smc_ism_support_dmb_nocopy(struct smcd_dev *smcd)
263 {
264 	/* for now only loopback-ism supports
265 	 * merging sndbuf with peer DMB to avoid
266 	 * data copies between them.
267 	 */
268 	return (smcd->ops->support_dmb_nocopy &&
269 		smcd->ops->support_dmb_nocopy(smcd));
270 }
271 
272 int smc_ism_attach_dmb(struct smcd_dev *dev, u64 token,
273 		       struct smc_buf_desc *dmb_desc)
274 {
275 	struct smcd_dmb dmb;
276 	int rc = 0;
277 
278 	if (!dev->ops->attach_dmb)
279 		return -EINVAL;
280 
281 	memset(&dmb, 0, sizeof(dmb));
282 	dmb.dmb_tok = token;
283 	rc = dev->ops->attach_dmb(dev, &dmb);
284 	if (!rc) {
285 		dmb_desc->sba_idx = dmb.sba_idx;
286 		dmb_desc->token = dmb.dmb_tok;
287 		dmb_desc->cpu_addr = dmb.cpu_addr;
288 		dmb_desc->dma_addr = dmb.dma_addr;
289 		dmb_desc->len = dmb.dmb_len;
290 		dmb_desc->is_attached = true;
291 	}
292 	return rc;
293 }
294 
295 int smc_ism_detach_dmb(struct smcd_dev *dev, u64 token)
296 {
297 	if (!dev->ops->detach_dmb)
298 		return -EINVAL;
299 
300 	return dev->ops->detach_dmb(dev, token);
301 }
302 
303 static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
304 				  struct sk_buff *skb,
305 				  struct netlink_callback *cb)
306 {
307 	char smc_pnet[SMC_MAX_PNETID_LEN + 1];
308 	struct smc_pci_dev smc_pci_dev;
309 	struct nlattr *port_attrs;
310 	struct dibs_dev *dibs;
311 	struct nlattr *attrs;
312 	int use_cnt = 0;
313 	void *nlh;
314 
315 	dibs = smcd->dibs;
316 	nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
317 			  &smc_gen_nl_family, NLM_F_MULTI,
318 			  SMC_NETLINK_GET_DEV_SMCD);
319 	if (!nlh)
320 		goto errmsg;
321 	attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD);
322 	if (!attrs)
323 		goto errout;
324 	use_cnt = atomic_read(&smcd->lgr_cnt);
325 	if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt))
326 		goto errattr;
327 	if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0))
328 		goto errattr;
329 	memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
330 	smc_set_pci_values(to_pci_dev(dibs->dev.parent), &smc_pci_dev);
331 	if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid))
332 		goto errattr;
333 	if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid))
334 		goto errattr;
335 	if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor))
336 		goto errattr;
337 	if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device))
338 		goto errattr;
339 	if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id))
340 		goto errattr;
341 
342 	port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT);
343 	if (!port_attrs)
344 		goto errattr;
345 	if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
346 		goto errportattr;
347 	memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN);
348 	smc_pnet[SMC_MAX_PNETID_LEN] = 0;
349 	if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
350 		goto errportattr;
351 
352 	nla_nest_end(skb, port_attrs);
353 	nla_nest_end(skb, attrs);
354 	genlmsg_end(skb, nlh);
355 	return 0;
356 
357 errportattr:
358 	nla_nest_cancel(skb, port_attrs);
359 errattr:
360 	nla_nest_cancel(skb, attrs);
361 errout:
362 	nlmsg_cancel(skb, nlh);
363 errmsg:
364 	return -EMSGSIZE;
365 }
366 
367 static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list,
368 				 struct sk_buff *skb,
369 				 struct netlink_callback *cb)
370 {
371 	struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
372 	int snum = cb_ctx->pos[0];
373 	struct smcd_dev *smcd;
374 	int num = 0;
375 
376 	mutex_lock(&dev_list->mutex);
377 	list_for_each_entry(smcd, &dev_list->list, list) {
378 		if (num < snum)
379 			goto next;
380 		if (smc_ism_is_loopback(smcd->dibs))
381 			goto next;
382 		if (smc_nl_handle_smcd_dev(smcd, skb, cb))
383 			goto errout;
384 next:
385 		num++;
386 	}
387 errout:
388 	mutex_unlock(&dev_list->mutex);
389 	cb_ctx->pos[0] = num;
390 }
391 
392 int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
393 {
394 	smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb);
395 	return skb->len;
396 }
397 
398 #if IS_ENABLED(CONFIG_ISM)
399 struct smc_ism_event_work {
400 	struct work_struct work;
401 	struct smcd_dev *smcd;
402 	struct ism_event event;
403 };
404 
405 #define ISM_EVENT_REQUEST		0x0001
406 #define ISM_EVENT_RESPONSE		0x0002
407 #define ISM_EVENT_REQUEST_IR		0x00000001
408 #define ISM_EVENT_CODE_SHUTDOWN		0x80
409 #define ISM_EVENT_CODE_TESTLINK		0x83
410 
411 union smcd_sw_event_info {
412 	u64	info;
413 	struct {
414 		u8		uid[SMC_LGR_ID_SIZE];
415 		unsigned short	vlan_id;
416 		u16		code;
417 	};
418 };
419 
420 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
421 {
422 	struct smcd_gid peer_gid = { .gid = wrk->event.tok,
423 				     .gid_ext = 0 };
424 	union smcd_sw_event_info ev_info;
425 
426 	ev_info.info = wrk->event.info;
427 	switch (wrk->event.code) {
428 	case ISM_EVENT_CODE_SHUTDOWN:	/* Peer shut down DMBs */
429 		smc_smcd_terminate(wrk->smcd, &peer_gid, ev_info.vlan_id);
430 		break;
431 	case ISM_EVENT_CODE_TESTLINK:	/* Activity timer */
432 		if (ev_info.code == ISM_EVENT_REQUEST &&
433 		    wrk->smcd->ops->signal_event) {
434 			ev_info.code = ISM_EVENT_RESPONSE;
435 			wrk->smcd->ops->signal_event(wrk->smcd,
436 						     &peer_gid,
437 						     ISM_EVENT_REQUEST_IR,
438 						     ISM_EVENT_CODE_TESTLINK,
439 						     ev_info.info);
440 			}
441 		break;
442 	}
443 }
444 
445 /* worker for SMC-D events */
446 static void smc_ism_event_work(struct work_struct *work)
447 {
448 	struct smc_ism_event_work *wrk =
449 		container_of(work, struct smc_ism_event_work, work);
450 	struct smcd_gid smcd_gid = { .gid = wrk->event.tok,
451 				     .gid_ext = 0 };
452 
453 	switch (wrk->event.type) {
454 	case ISM_EVENT_GID:	/* GID event, token is peer GID */
455 		smc_smcd_terminate(wrk->smcd, &smcd_gid, VLAN_VID_MASK);
456 		break;
457 	case ISM_EVENT_DMB:
458 		break;
459 	case ISM_EVENT_SWR:	/* Software defined event */
460 		smcd_handle_sw_event(wrk);
461 		break;
462 	}
463 	kfree(wrk);
464 }
465 #endif
466 
467 static struct smcd_dev *smcd_alloc_dev(const char *name,
468 				       const struct smcd_ops *ops,
469 				       int max_dmbs)
470 {
471 	struct smcd_dev *smcd;
472 
473 	smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
474 	if (!smcd)
475 		return NULL;
476 	smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
477 			     GFP_KERNEL);
478 	if (!smcd->conn)
479 		goto free_smcd;
480 
481 	smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
482 						 WQ_MEM_RECLAIM, name);
483 	if (!smcd->event_wq)
484 		goto free_conn;
485 
486 	smcd->ops = ops;
487 
488 	spin_lock_init(&smcd->lock);
489 	spin_lock_init(&smcd->lgr_lock);
490 	INIT_LIST_HEAD(&smcd->vlan);
491 	INIT_LIST_HEAD(&smcd->lgr_list);
492 	init_waitqueue_head(&smcd->lgrs_deleted);
493 	return smcd;
494 
495 free_conn:
496 	kfree(smcd->conn);
497 free_smcd:
498 	kfree(smcd);
499 	return NULL;
500 }
501 
502 static void smcd_register_dev(struct dibs_dev *dibs)
503 {
504 	struct smcd_dev *smcd, *fentry;
505 	const struct smcd_ops *ops;
506 	struct smc_lo_dev *smc_lo;
507 	struct ism_dev *ism;
508 
509 	if (smc_ism_is_loopback(dibs)) {
510 		if (smc_loopback_init(&smc_lo))
511 			return;
512 	}
513 
514 	if (smc_ism_is_loopback(dibs)) {
515 		ops = smc_lo_get_smcd_ops();
516 		smcd = smcd_alloc_dev(dev_name(&dibs->dev), ops,
517 				      SMC_LO_MAX_DMBS);
518 	} else {
519 		ism = dibs->drv_priv;
520 #if IS_ENABLED(CONFIG_ISM)
521 		ops = ism_get_smcd_ops();
522 #endif
523 		smcd = smcd_alloc_dev(dev_name(&dibs->dev), ops,
524 				      ISM_NR_DMBS);
525 	}
526 	if (!smcd)
527 		return;
528 
529 	smcd->dibs = dibs;
530 	dibs_set_priv(dibs, &smc_dibs_client, smcd);
531 
532 	if (smc_ism_is_loopback(dibs)) {
533 		smcd->priv = smc_lo;
534 		smc_lo->smcd = smcd;
535 	} else {
536 		smcd->priv = ism;
537 #if IS_ENABLED(CONFIG_ISM)
538 		ism_set_priv(ism, &smc_ism_client, smcd);
539 		smcd->client = &smc_ism_client;
540 #endif
541 	}
542 
543 	if (smc_pnetid_by_dev_port(dibs->dev.parent, 0, smcd->pnetid))
544 		smc_pnetid_by_table_smcd(smcd);
545 
546 	if (smc_ism_is_loopback(dibs) ||
547 	    (dibs->ops->add_vlan_id &&
548 	     !dibs->ops->add_vlan_id(dibs, ISM_RESERVED_VLANID))) {
549 		smc_ism_set_v2_capable();
550 	}
551 
552 	mutex_lock(&smcd_dev_list.mutex);
553 	/* sort list:
554 	 * - devices without pnetid before devices with pnetid;
555 	 * - loopback-ism always at the very beginning;
556 	 */
557 	if (!smcd->pnetid[0]) {
558 		fentry = list_first_entry_or_null(&smcd_dev_list.list,
559 						  struct smcd_dev, list);
560 		if (fentry && smc_ism_is_loopback(fentry->dibs))
561 			list_add(&smcd->list, &fentry->list);
562 		else
563 			list_add(&smcd->list, &smcd_dev_list.list);
564 	} else {
565 		list_add_tail(&smcd->list, &smcd_dev_list.list);
566 	}
567 	mutex_unlock(&smcd_dev_list.mutex);
568 
569 	if (smc_pnet_is_pnetid_set(smcd->pnetid))
570 		pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
571 				    dev_name(&dibs->dev), smcd->pnetid,
572 				    smcd->pnetid_by_user ?
573 					" (user defined)" :
574 					"");
575 	else
576 		pr_warn_ratelimited("smc: adding smcd device %s without pnetid\n",
577 				    dev_name(&dibs->dev));
578 	return;
579 }
580 
581 static void smcd_unregister_dev(struct dibs_dev *dibs)
582 {
583 	struct smcd_dev *smcd = dibs_get_priv(dibs, &smc_dibs_client);
584 
585 	pr_warn_ratelimited("smc: removing smcd device %s\n",
586 			    dev_name(&dibs->dev));
587 	smcd->going_away = 1;
588 	smc_smcd_terminate_all(smcd);
589 	mutex_lock(&smcd_dev_list.mutex);
590 	list_del_init(&smcd->list);
591 	mutex_unlock(&smcd_dev_list.mutex);
592 	destroy_workqueue(smcd->event_wq);
593 	if (smc_ism_is_loopback(dibs))
594 		smc_loopback_exit();
595 	kfree(smcd->conn);
596 	kfree(smcd);
597 }
598 
599 #if IS_ENABLED(CONFIG_ISM)
600 /* SMCD Device event handler. Called from ISM device interrupt handler.
601  * Parameters are ism device pointer,
602  * - event->type (0 --> DMB, 1 --> GID),
603  * - event->code (event code),
604  * - event->tok (either DMB token when event type 0, or GID when event type 1)
605  * - event->time (time of day)
606  * - event->info (debug info).
607  *
608  * Context:
609  * - Function called in IRQ context from ISM device driver event handler.
610  */
611 static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event)
612 {
613 	struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
614 	struct smc_ism_event_work *wrk;
615 
616 	if (smcd->going_away)
617 		return;
618 	/* copy event to event work queue, and let it be handled there */
619 	wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
620 	if (!wrk)
621 		return;
622 	INIT_WORK(&wrk->work, smc_ism_event_work);
623 	wrk->smcd = smcd;
624 	wrk->event = *event;
625 	queue_work(smcd->event_wq, &wrk->work);
626 }
627 
628 /* SMCD Device interrupt handler. Called from ISM device interrupt handler.
629  * Parameters are the ism device pointer, DMB number, and the DMBE bitmask.
630  * Find the connection and schedule the tasklet for this connection.
631  *
632  * Context:
633  * - Function called in IRQ context from ISM device driver IRQ handler.
634  */
635 static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno,
636 			    u16 dmbemask)
637 {
638 	struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client);
639 	struct smc_connection *conn = NULL;
640 	unsigned long flags;
641 
642 	spin_lock_irqsave(&smcd->lock, flags);
643 	conn = smcd->conn[dmbno];
644 	if (conn && !conn->killed)
645 		tasklet_schedule(&conn->rx_tsklet);
646 	spin_unlock_irqrestore(&smcd->lock, flags);
647 }
648 #endif
649 
650 int smc_ism_signal_shutdown(struct smc_link_group *lgr)
651 {
652 	int rc = 0;
653 #if IS_ENABLED(CONFIG_ISM)
654 	union smcd_sw_event_info ev_info;
655 
656 	if (lgr->peer_shutdown)
657 		return 0;
658 	if (!lgr->smcd->ops->signal_event)
659 		return 0;
660 
661 	memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
662 	ev_info.vlan_id = lgr->vlan_id;
663 	ev_info.code = ISM_EVENT_REQUEST;
664 	rc = lgr->smcd->ops->signal_event(lgr->smcd, &lgr->peer_gid,
665 					  ISM_EVENT_REQUEST_IR,
666 					  ISM_EVENT_CODE_SHUTDOWN,
667 					  ev_info.info);
668 #endif
669 	return rc;
670 }
671 
672 int smc_ism_init(void)
673 {
674 	int rc = 0;
675 
676 	smc_ism_v2_capable = false;
677 	smc_ism_create_system_eid();
678 
679 #if IS_ENABLED(CONFIG_ISM)
680 	rc = ism_register_client(&smc_ism_client);
681 #endif
682 	rc = dibs_register_client(&smc_dibs_client);
683 	return rc;
684 }
685 
686 void smc_ism_exit(void)
687 {
688 	dibs_unregister_client(&smc_dibs_client);
689 #if IS_ENABLED(CONFIG_ISM)
690 	ism_unregister_client(&smc_ism_client);
691 #endif
692 }
693