1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/etherdevice.h>
20 #include <linux/if_bridge.h>
21 #include <linux/list.h>
22 #include <linux/hash.h>
23 #include <linux/hashtable.h>
24 #include <net/switchdev.h>
25 #include <asm/machine.h>
26 #include <asm/chsc.h>
27 #include <asm/css_chars.h>
28 #include <asm/setup.h>
29 #include "qeth_core.h"
30 #include "qeth_l2.h"
31
qeth_l2_setdelmac_makerc(struct qeth_card * card,u16 retcode)32 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
33 {
34 int rc;
35
36 if (retcode)
37 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
38 switch (retcode) {
39 case IPA_RC_SUCCESS:
40 rc = 0;
41 break;
42 case IPA_RC_L2_UNSUPPORTED_CMD:
43 rc = -EOPNOTSUPP;
44 break;
45 case IPA_RC_L2_ADDR_TABLE_FULL:
46 rc = -ENOSPC;
47 break;
48 case IPA_RC_L2_DUP_MAC:
49 case IPA_RC_L2_DUP_LAYER3_MAC:
50 rc = -EADDRINUSE;
51 break;
52 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
53 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
54 rc = -EADDRNOTAVAIL;
55 break;
56 case IPA_RC_L2_MAC_NOT_FOUND:
57 rc = -ENOENT;
58 break;
59 default:
60 rc = -EIO;
61 break;
62 }
63 return rc;
64 }
65
qeth_l2_send_setdelmac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)66 static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
67 struct qeth_reply *reply,
68 unsigned long data)
69 {
70 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
71
72 return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
73 }
74
qeth_l2_send_setdelmac(struct qeth_card * card,const __u8 * mac,enum qeth_ipa_cmds ipacmd)75 static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
76 enum qeth_ipa_cmds ipacmd)
77 {
78 struct qeth_ipa_cmd *cmd;
79 struct qeth_cmd_buffer *iob;
80
81 QETH_CARD_TEXT(card, 2, "L2sdmac");
82 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
83 IPA_DATA_SIZEOF(setdelmac));
84 if (!iob)
85 return -ENOMEM;
86 cmd = __ipa_cmd(iob);
87 cmd->data.setdelmac.mac_length = ETH_ALEN;
88 ether_addr_copy(cmd->data.setdelmac.mac, mac);
89 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
90 }
91
qeth_l2_send_setmac(struct qeth_card * card,const __u8 * mac)92 static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
93 {
94 int rc;
95
96 QETH_CARD_TEXT(card, 2, "L2Setmac");
97 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
98 if (rc == 0) {
99 dev_info(&card->gdev->dev,
100 "MAC address %pM successfully registered\n", mac);
101 } else {
102 switch (rc) {
103 case -EADDRINUSE:
104 dev_warn(&card->gdev->dev,
105 "MAC address %pM already exists\n", mac);
106 break;
107 case -EADDRNOTAVAIL:
108 dev_warn(&card->gdev->dev,
109 "MAC address %pM is not authorized\n", mac);
110 break;
111 }
112 }
113 return rc;
114 }
115
qeth_l2_write_mac(struct qeth_card * card,u8 * mac)116 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
117 {
118 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
119 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
120 int rc;
121
122 QETH_CARD_TEXT(card, 2, "L2Wmac");
123 rc = qeth_l2_send_setdelmac(card, mac, cmd);
124 if (rc == -EADDRINUSE)
125 QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
126 ether_addr_to_u64(mac), CARD_DEVID(card));
127 else if (rc)
128 QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
129 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
130 return rc;
131 }
132
qeth_l2_remove_mac(struct qeth_card * card,u8 * mac)133 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
134 {
135 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
136 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
137 int rc;
138
139 QETH_CARD_TEXT(card, 2, "L2Rmac");
140 rc = qeth_l2_send_setdelmac(card, mac, cmd);
141 if (rc)
142 QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
143 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
144 return rc;
145 }
146
qeth_l2_drain_rx_mode_cache(struct qeth_card * card)147 static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
148 {
149 struct qeth_mac *mac;
150 struct hlist_node *tmp;
151 int i;
152
153 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
154 hash_del(&mac->hnode);
155 kfree(mac);
156 }
157 }
158
qeth_l2_fill_header(struct qeth_qdio_out_q * queue,struct qeth_hdr * hdr,struct sk_buff * skb,__be16 proto,unsigned int data_len)159 static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
160 struct qeth_hdr *hdr, struct sk_buff *skb,
161 __be16 proto, unsigned int data_len)
162 {
163 int cast_type = qeth_get_ether_cast_type(skb);
164 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
165
166 hdr->hdr.l2.pkt_length = data_len;
167
168 if (skb_is_gso(skb)) {
169 hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
170 } else {
171 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
172 if (skb->ip_summed == CHECKSUM_PARTIAL)
173 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
174 }
175
176 /* set byte byte 3 to casting flags */
177 if (cast_type == RTN_MULTICAST)
178 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
179 else if (cast_type == RTN_BROADCAST)
180 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
181 else
182 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
183
184 /* VSWITCH relies on the VLAN
185 * information to be present in
186 * the QDIO header */
187 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
188 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
189 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
190 }
191 }
192
qeth_l2_setdelvlan_makerc(struct qeth_card * card,u16 retcode)193 static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
194 {
195 if (retcode)
196 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
197
198 switch (retcode) {
199 case IPA_RC_SUCCESS:
200 return 0;
201 case IPA_RC_L2_INVALID_VLAN_ID:
202 return -EINVAL;
203 case IPA_RC_L2_DUP_VLAN_ID:
204 return -EEXIST;
205 case IPA_RC_L2_VLAN_ID_NOT_FOUND:
206 return -ENOENT;
207 case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
208 return -EPERM;
209 default:
210 return -EIO;
211 }
212 }
213
qeth_l2_send_setdelvlan_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)214 static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
215 struct qeth_reply *reply,
216 unsigned long data)
217 {
218 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
219
220 QETH_CARD_TEXT(card, 2, "L2sdvcb");
221 if (cmd->hdr.return_code) {
222 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
223 cmd->data.setdelvlan.vlan_id,
224 CARD_DEVID(card), cmd->hdr.return_code);
225 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
226 }
227 return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
228 }
229
qeth_l2_send_setdelvlan(struct qeth_card * card,__u16 i,enum qeth_ipa_cmds ipacmd)230 static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
231 enum qeth_ipa_cmds ipacmd)
232 {
233 struct qeth_ipa_cmd *cmd;
234 struct qeth_cmd_buffer *iob;
235
236 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
237 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
238 IPA_DATA_SIZEOF(setdelvlan));
239 if (!iob)
240 return -ENOMEM;
241 cmd = __ipa_cmd(iob);
242 cmd->data.setdelvlan.vlan_id = i;
243 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
244 }
245
qeth_l2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)246 static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
247 __be16 proto, u16 vid)
248 {
249 struct qeth_card *card = dev->ml_priv;
250
251 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
252 if (!vid)
253 return 0;
254
255 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
256 }
257
qeth_l2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)258 static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
259 __be16 proto, u16 vid)
260 {
261 struct qeth_card *card = dev->ml_priv;
262
263 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
264 if (!vid)
265 return 0;
266
267 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
268 }
269
qeth_l2_set_pnso_mode(struct qeth_card * card,enum qeth_pnso_mode mode)270 static void qeth_l2_set_pnso_mode(struct qeth_card *card,
271 enum qeth_pnso_mode mode)
272 {
273 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
274 WRITE_ONCE(card->info.pnso_mode, mode);
275 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
276
277 if (mode == QETH_PNSO_NONE)
278 drain_workqueue(card->event_wq);
279 }
280
qeth_l2_dev2br_fdb_flush(struct qeth_card * card)281 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
282 {
283 struct switchdev_notifier_fdb_info info = {};
284
285 QETH_CARD_TEXT(card, 2, "fdbflush");
286
287 info.addr = NULL;
288 /* flush all VLANs: */
289 info.vid = 0;
290 info.added_by_user = false;
291 info.offloaded = true;
292
293 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
294 card->dev, &info.info, NULL);
295 }
296
qeth_l2_request_initial_mac(struct qeth_card * card)297 static int qeth_l2_request_initial_mac(struct qeth_card *card)
298 {
299 int rc = 0;
300
301 QETH_CARD_TEXT(card, 2, "l2reqmac");
302
303 if (machine_is_vm()) {
304 rc = qeth_vm_request_mac(card);
305 if (!rc)
306 goto out;
307 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
308 CARD_DEVID(card), rc);
309 QETH_CARD_TEXT_(card, 2, "err%04x", rc);
310 /* fall back to alternative mechanism: */
311 }
312
313 rc = qeth_setadpparms_change_macaddr(card);
314 if (!rc)
315 goto out;
316 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
317 CARD_DEVID(card), rc);
318 QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
319
320 /* Fall back once more, but some devices don't support a custom MAC
321 * address:
322 */
323 if (IS_OSM(card) || IS_OSX(card))
324 return (rc) ? rc : -EADDRNOTAVAIL;
325 eth_hw_addr_random(card->dev);
326
327 out:
328 QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
329 return 0;
330 }
331
qeth_l2_register_dev_addr(struct qeth_card * card)332 static void qeth_l2_register_dev_addr(struct qeth_card *card)
333 {
334 if (!is_valid_ether_addr(card->dev->dev_addr))
335 qeth_l2_request_initial_mac(card);
336
337 if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
338 card->info.dev_addr_is_registered = 1;
339 else
340 card->info.dev_addr_is_registered = 0;
341 }
342
qeth_l2_validate_addr(struct net_device * dev)343 static int qeth_l2_validate_addr(struct net_device *dev)
344 {
345 struct qeth_card *card = dev->ml_priv;
346
347 if (card->info.dev_addr_is_registered)
348 return eth_validate_addr(dev);
349
350 QETH_CARD_TEXT(card, 4, "nomacadr");
351 return -EPERM;
352 }
353
qeth_l2_set_mac_address(struct net_device * dev,void * p)354 static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
355 {
356 struct sockaddr *addr = p;
357 struct qeth_card *card = dev->ml_priv;
358 u8 old_addr[ETH_ALEN];
359 int rc = 0;
360
361 QETH_CARD_TEXT(card, 3, "setmac");
362
363 if (IS_OSM(card) || IS_OSX(card)) {
364 QETH_CARD_TEXT(card, 3, "setmcTYP");
365 return -EOPNOTSUPP;
366 }
367 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
368 if (!is_valid_ether_addr(addr->sa_data))
369 return -EADDRNOTAVAIL;
370
371 /* don't register the same address twice */
372 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
373 card->info.dev_addr_is_registered)
374 return 0;
375
376 /* add the new address, switch over, drop the old */
377 rc = qeth_l2_send_setmac(card, addr->sa_data);
378 if (rc)
379 return rc;
380 ether_addr_copy(old_addr, dev->dev_addr);
381 eth_hw_addr_set(dev, addr->sa_data);
382
383 if (card->info.dev_addr_is_registered)
384 qeth_l2_remove_mac(card, old_addr);
385 card->info.dev_addr_is_registered = 1;
386 return 0;
387 }
388
qeth_l2_promisc_to_bridge(struct qeth_card * card,bool enable)389 static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
390 {
391 int role;
392 int rc;
393
394 QETH_CARD_TEXT(card, 3, "pmisc2br");
395
396 if (enable) {
397 if (card->options.sbp.reflect_promisc_primary)
398 role = QETH_SBP_ROLE_PRIMARY;
399 else
400 role = QETH_SBP_ROLE_SECONDARY;
401 } else
402 role = QETH_SBP_ROLE_NONE;
403
404 rc = qeth_bridgeport_setrole(card, role);
405 QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
406 if (!rc) {
407 card->options.sbp.role = role;
408 card->info.promisc_mode = enable;
409 }
410 }
411
qeth_l2_set_promisc_mode(struct qeth_card * card)412 static void qeth_l2_set_promisc_mode(struct qeth_card *card)
413 {
414 bool enable = card->dev->flags & IFF_PROMISC;
415
416 if (card->info.promisc_mode == enable)
417 return;
418
419 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
420 qeth_setadp_promisc_mode(card, enable);
421 } else {
422 mutex_lock(&card->sbp_lock);
423 if (card->options.sbp.reflect_promisc)
424 qeth_l2_promisc_to_bridge(card, enable);
425 mutex_unlock(&card->sbp_lock);
426 }
427 }
428
429 /* New MAC address is added to the hash table and marked to be written on card
430 * only if there is not in the hash table storage already
431 *
432 */
qeth_l2_add_mac(struct qeth_card * card,struct netdev_hw_addr * ha)433 static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
434 {
435 u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
436 struct qeth_mac *mac;
437
438 hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
439 if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
440 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
441 return;
442 }
443 }
444
445 mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
446 if (!mac)
447 return;
448
449 ether_addr_copy(mac->mac_addr, ha->addr);
450 mac->disp_flag = QETH_DISP_ADDR_ADD;
451
452 hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
453 }
454
qeth_l2_rx_mode_work(struct work_struct * work)455 static void qeth_l2_rx_mode_work(struct work_struct *work)
456 {
457 struct qeth_card *card = container_of(work, struct qeth_card,
458 rx_mode_work);
459 struct net_device *dev = card->dev;
460 struct netdev_hw_addr *ha;
461 struct qeth_mac *mac;
462 struct hlist_node *tmp;
463 int i;
464 int rc;
465
466 QETH_CARD_TEXT(card, 3, "setmulti");
467
468 netif_addr_lock_bh(dev);
469 netdev_for_each_mc_addr(ha, dev)
470 qeth_l2_add_mac(card, ha);
471 netdev_for_each_uc_addr(ha, dev)
472 qeth_l2_add_mac(card, ha);
473 netif_addr_unlock_bh(dev);
474
475 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
476 switch (mac->disp_flag) {
477 case QETH_DISP_ADDR_DELETE:
478 qeth_l2_remove_mac(card, mac->mac_addr);
479 hash_del(&mac->hnode);
480 kfree(mac);
481 break;
482 case QETH_DISP_ADDR_ADD:
483 rc = qeth_l2_write_mac(card, mac->mac_addr);
484 if (rc) {
485 hash_del(&mac->hnode);
486 kfree(mac);
487 break;
488 }
489 fallthrough;
490 default:
491 /* for next call to set_rx_mode(): */
492 mac->disp_flag = QETH_DISP_ADDR_DELETE;
493 }
494 }
495
496 qeth_l2_set_promisc_mode(card);
497 }
498
qeth_l2_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)499 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
500 struct net_device *dev)
501 {
502 struct qeth_card *card = dev->ml_priv;
503 u16 txq = skb_get_queue_mapping(skb);
504 struct qeth_qdio_out_q *queue;
505 int rc;
506
507 if (!skb_is_gso(skb))
508 qdisc_skb_cb(skb)->pkt_len = skb->len;
509 if (IS_IQD(card))
510 txq = qeth_iqd_translate_txq(dev, txq);
511 queue = card->qdio.out_qs[txq];
512
513 rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
514 qeth_l2_fill_header);
515 if (!rc)
516 return NETDEV_TX_OK;
517
518 QETH_TXQ_STAT_INC(queue, tx_dropped);
519 kfree_skb(skb);
520 return NETDEV_TX_OK;
521 }
522
qeth_l2_iqd_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)523 static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
524 struct net_device *sb_dev)
525 {
526 return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
527 sb_dev);
528 }
529
qeth_l2_set_rx_mode(struct net_device * dev)530 static void qeth_l2_set_rx_mode(struct net_device *dev)
531 {
532 struct qeth_card *card = dev->ml_priv;
533
534 schedule_work(&card->rx_mode_work);
535 }
536
537 /**
538 * qeth_l2_pnso() - perform network subchannel operation
539 * @card: qeth_card structure pointer
540 * @oc: Operation Code
541 * @cnc: Boolean Change-Notification Control
542 * @cb: Callback function will be executed for each element
543 * of the address list
544 * @priv: Pointer to pass to the callback function.
545 *
546 * Collects network information in a network address list and calls the
547 * callback function for every entry in the list. If "change-notification-
548 * control" is set, further changes in the address list will be reported
549 * via the IPA command.
550 */
qeth_l2_pnso(struct qeth_card * card,u8 oc,int cnc,void (* cb)(void * priv,struct chsc_pnso_naid_l2 * entry),void * priv)551 static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
552 void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
553 void *priv)
554 {
555 struct ccw_device *ddev = CARD_DDEV(card);
556 struct chsc_pnso_area *rr;
557 u32 prev_instance = 0;
558 int isfirstblock = 1;
559 int i, size, elems;
560 int rc;
561
562 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
563 if (rr == NULL)
564 return -ENOMEM;
565 do {
566 QETH_CARD_TEXT(card, 2, "PNSO");
567 /* on the first iteration, naihdr.resume_token will be zero */
568 rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
569 cnc);
570 if (rc)
571 continue;
572 if (cb == NULL)
573 continue;
574
575 size = rr->naihdr.naids;
576 if (size != sizeof(struct chsc_pnso_naid_l2)) {
577 WARN_ON_ONCE(1);
578 continue;
579 }
580
581 elems = (rr->response.length - sizeof(struct chsc_header) -
582 sizeof(struct chsc_pnso_naihdr)) / size;
583
584 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
585 /* Inform the caller that they need to scrap */
586 /* the data that was already reported via cb */
587 rc = -EAGAIN;
588 break;
589 }
590 isfirstblock = 0;
591 prev_instance = rr->naihdr.instance;
592 for (i = 0; i < elems; i++)
593 (*cb)(priv, &rr->entries[i]);
594 } while ((rc == -EBUSY) || (!rc && /* list stored */
595 /* resume token is non-zero => list incomplete */
596 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
597
598 if (rc)
599 QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
600
601 free_page((unsigned long)rr);
602 return rc;
603 }
604
qeth_is_my_net_if_token(struct qeth_card * card,struct net_if_token * token)605 static bool qeth_is_my_net_if_token(struct qeth_card *card,
606 struct net_if_token *token)
607 {
608 return ((card->info.ddev_devno == token->devnum) &&
609 (card->info.cssid == token->cssid) &&
610 (card->info.iid == token->iid) &&
611 (card->info.ssid == token->ssid) &&
612 (card->info.chpid == token->chpid) &&
613 (card->info.chid == token->chid));
614 }
615
616 /**
617 * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
618 * @card: qeth_card structure pointer
619 * @code: event bitmask: high order bit 0x80 set to
620 * 1 - removal of an object
621 * 0 - addition of an object
622 * Object type(s):
623 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
624 * @token: "network token" structure identifying 'physical' location
625 * of the target
626 * @addr_lnid: structure with MAC address and VLAN ID of the target
627 */
qeth_l2_dev2br_fdb_notify(struct qeth_card * card,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)628 static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
629 struct net_if_token *token,
630 struct mac_addr_lnid *addr_lnid)
631 {
632 struct switchdev_notifier_fdb_info info = {};
633 u8 ntfy_mac[ETH_ALEN];
634
635 ether_addr_copy(ntfy_mac, addr_lnid->mac);
636 /* Ignore VLAN only changes */
637 if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
638 return;
639 /* Ignore mcast entries */
640 if (is_multicast_ether_addr(ntfy_mac))
641 return;
642 /* Ignore my own addresses */
643 if (qeth_is_my_net_if_token(card, token))
644 return;
645
646 info.addr = ntfy_mac;
647 /* don't report VLAN IDs */
648 info.vid = 0;
649 info.added_by_user = false;
650 info.offloaded = true;
651
652 if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
653 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
654 card->dev, &info.info, NULL);
655 QETH_CARD_TEXT(card, 4, "andelmac");
656 QETH_CARD_TEXT_(card, 4,
657 "mc%012llx", ether_addr_to_u64(ntfy_mac));
658 } else {
659 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
660 card->dev, &info.info, NULL);
661 QETH_CARD_TEXT(card, 4, "anaddmac");
662 QETH_CARD_TEXT_(card, 4,
663 "mc%012llx", ether_addr_to_u64(ntfy_mac));
664 }
665 }
666
qeth_l2_dev2br_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)667 static void qeth_l2_dev2br_an_set_cb(void *priv,
668 struct chsc_pnso_naid_l2 *entry)
669 {
670 u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
671 struct qeth_card *card = priv;
672
673 if (entry->addr_lnid.lnid < VLAN_N_VID)
674 code |= IPA_ADDR_CHANGE_CODE_VLANID;
675 qeth_l2_dev2br_fdb_notify(card, code,
676 (struct net_if_token *)&entry->nit,
677 (struct mac_addr_lnid *)&entry->addr_lnid);
678 }
679
680 /**
681 * qeth_l2_dev2br_an_set() -
682 * Enable or disable 'dev to bridge network address notification'
683 * @card: qeth_card structure pointer
684 * @enable: Enable or disable 'dev to bridge network address notification'
685 *
686 * Returns negative errno-compatible error indication or 0 on success.
687 *
688 * On enable, emits a series of address notifications for all
689 * currently registered hosts.
690 */
qeth_l2_dev2br_an_set(struct qeth_card * card,bool enable)691 static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
692 {
693 int rc;
694
695 if (enable) {
696 QETH_CARD_TEXT(card, 2, "anseton");
697 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
698 qeth_l2_dev2br_an_set_cb, card);
699 if (rc == -EAGAIN)
700 /* address notification enabled, but inconsistent
701 * addresses reported -> disable address notification
702 */
703 qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
704 NULL, NULL);
705 } else {
706 QETH_CARD_TEXT(card, 2, "ansetoff");
707 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
708 }
709
710 return rc;
711 }
712
713 struct qeth_l2_br2dev_event_work {
714 struct work_struct work;
715 struct net_device *br_dev;
716 struct net_device *lsync_dev;
717 struct net_device *dst_dev;
718 unsigned long event;
719 unsigned char addr[ETH_ALEN];
720 };
721
722 static const struct net_device_ops qeth_l2_iqd_netdev_ops;
723 static const struct net_device_ops qeth_l2_osa_netdev_ops;
724
qeth_l2_must_learn(struct net_device * netdev,struct net_device * dstdev)725 static bool qeth_l2_must_learn(struct net_device *netdev,
726 struct net_device *dstdev)
727 {
728 struct qeth_priv *priv;
729
730 priv = netdev_priv(netdev);
731 return (netdev != dstdev &&
732 (priv->brport_features & BR_LEARNING_SYNC) &&
733 !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
734 br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
735 (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
736 netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
737 }
738
739 /**
740 * qeth_l2_br2dev_worker() - update local MACs
741 * @work: bridge to device FDB update
742 *
743 * Update local MACs of a learning_sync bridgeport so it can receive
744 * messages for a destination port.
745 * In case of an isolated learning_sync port, also update its isolated
746 * siblings.
747 */
qeth_l2_br2dev_worker(struct work_struct * work)748 static void qeth_l2_br2dev_worker(struct work_struct *work)
749 {
750 struct qeth_l2_br2dev_event_work *br2dev_event_work =
751 container_of(work, struct qeth_l2_br2dev_event_work, work);
752 struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
753 struct net_device *dstdev = br2dev_event_work->dst_dev;
754 struct net_device *brdev = br2dev_event_work->br_dev;
755 unsigned long event = br2dev_event_work->event;
756 unsigned char *addr = br2dev_event_work->addr;
757 struct qeth_card *card = lsyncdev->ml_priv;
758 struct net_device *lowerdev;
759 struct list_head *iter;
760 int err = 0;
761
762 QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
763 QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
764
765 rcu_read_lock();
766 /* Verify preconditions are still valid: */
767 if (!netif_is_bridge_port(lsyncdev) ||
768 brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
769 goto unlock;
770 if (!qeth_l2_must_learn(lsyncdev, dstdev))
771 goto unlock;
772
773 if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
774 /* Update lsyncdev and its isolated sibling(s): */
775 iter = &brdev->adj_list.lower;
776 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
777 while (lowerdev) {
778 if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
779 switch (event) {
780 case SWITCHDEV_FDB_ADD_TO_DEVICE:
781 err = dev_uc_add(lowerdev, addr);
782 break;
783 case SWITCHDEV_FDB_DEL_TO_DEVICE:
784 err = dev_uc_del(lowerdev, addr);
785 break;
786 default:
787 break;
788 }
789 if (err) {
790 QETH_CARD_TEXT(card, 2, "b2derris");
791 QETH_CARD_TEXT_(card, 2,
792 "err%02lx%03d", event,
793 lowerdev->ifindex);
794 }
795 }
796 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
797 }
798 } else {
799 switch (event) {
800 case SWITCHDEV_FDB_ADD_TO_DEVICE:
801 err = dev_uc_add(lsyncdev, addr);
802 break;
803 case SWITCHDEV_FDB_DEL_TO_DEVICE:
804 err = dev_uc_del(lsyncdev, addr);
805 break;
806 default:
807 break;
808 }
809 if (err)
810 QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
811 }
812
813 unlock:
814 rcu_read_unlock();
815 dev_put(brdev);
816 dev_put(lsyncdev);
817 dev_put(dstdev);
818 kfree(br2dev_event_work);
819 }
820
qeth_l2_br2dev_queue_work(struct net_device * brdev,struct net_device * lsyncdev,struct net_device * dstdev,unsigned long event,const unsigned char * addr)821 static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
822 struct net_device *lsyncdev,
823 struct net_device *dstdev,
824 unsigned long event,
825 const unsigned char *addr)
826 {
827 struct qeth_l2_br2dev_event_work *worker_data;
828 struct qeth_card *card;
829
830 worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
831 if (!worker_data)
832 return -ENOMEM;
833 INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
834 worker_data->br_dev = brdev;
835 worker_data->lsync_dev = lsyncdev;
836 worker_data->dst_dev = dstdev;
837 worker_data->event = event;
838 ether_addr_copy(worker_data->addr, addr);
839
840 card = lsyncdev->ml_priv;
841 /* Take a reference on the sw port devices and the bridge */
842 dev_hold(brdev);
843 dev_hold(lsyncdev);
844 dev_hold(dstdev);
845 queue_work(card->event_wq, &worker_data->work);
846 return 0;
847 }
848
849 /* Called under rtnl_lock */
qeth_l2_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)850 static int qeth_l2_switchdev_event(struct notifier_block *unused,
851 unsigned long event, void *ptr)
852 {
853 struct net_device *dstdev, *brdev, *lowerdev;
854 struct switchdev_notifier_fdb_info *fdb_info;
855 struct switchdev_notifier_info *info = ptr;
856 struct list_head *iter;
857 struct qeth_card *card;
858 int rc;
859
860 if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
861 event == SWITCHDEV_FDB_DEL_TO_DEVICE))
862 return NOTIFY_DONE;
863
864 dstdev = switchdev_notifier_info_to_dev(info);
865 brdev = netdev_master_upper_dev_get_rcu(dstdev);
866 if (!brdev || !netif_is_bridge_master(brdev))
867 return NOTIFY_DONE;
868 fdb_info = container_of(info,
869 struct switchdev_notifier_fdb_info,
870 info);
871 iter = &brdev->adj_list.lower;
872 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
873 while (lowerdev) {
874 if (qeth_l2_must_learn(lowerdev, dstdev)) {
875 card = lowerdev->ml_priv;
876 QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
877 rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
878 dstdev, event,
879 fdb_info->addr);
880 if (rc) {
881 QETH_CARD_TEXT(card, 2, "b2dqwerr");
882 return NOTIFY_BAD;
883 }
884 }
885 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
886 }
887 return NOTIFY_DONE;
888 }
889
890 static struct notifier_block qeth_l2_sw_notifier = {
891 .notifier_call = qeth_l2_switchdev_event,
892 };
893
894 static refcount_t qeth_l2_switchdev_notify_refcnt;
895
896 /* Called under rtnl_lock */
qeth_l2_br2dev_get(void)897 static void qeth_l2_br2dev_get(void)
898 {
899 int rc;
900
901 if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
902 rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
903 if (rc) {
904 QETH_DBF_MESSAGE(2,
905 "failed to register qeth_l2_sw_notifier: %d\n",
906 rc);
907 } else {
908 refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
909 QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
910 }
911 }
912 QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
913 qeth_l2_switchdev_notify_refcnt.refs.counter);
914 }
915
916 /* Called under rtnl_lock */
qeth_l2_br2dev_put(void)917 static void qeth_l2_br2dev_put(void)
918 {
919 int rc;
920
921 if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
922 rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
923 if (rc) {
924 QETH_DBF_MESSAGE(2,
925 "failed to unregister qeth_l2_sw_notifier: %d\n",
926 rc);
927 } else {
928 QETH_DBF_MESSAGE(2,
929 "qeth_l2_sw_notifier unregistered\n");
930 }
931 }
932 QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
933 qeth_l2_switchdev_notify_refcnt.refs.counter);
934 }
935
qeth_l2_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)936 static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
937 struct net_device *dev, u32 filter_mask,
938 int nlflags)
939 {
940 struct qeth_priv *priv = netdev_priv(dev);
941 struct qeth_card *card = dev->ml_priv;
942 u16 mode = BRIDGE_MODE_UNDEF;
943
944 /* Do not even show qeth devs that cannot do bridge_setlink */
945 if (!priv->brport_hw_features || !netif_device_present(dev) ||
946 qeth_bridgeport_is_in_use(card))
947 return -EOPNOTSUPP;
948
949 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
950 mode, priv->brport_features,
951 priv->brport_hw_features,
952 nlflags, filter_mask, NULL);
953 }
954
955 static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
956 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
957 };
958
959 /**
960 * qeth_l2_bridge_setlink() - set bridgeport attributes
961 * @dev: netdevice
962 * @nlh: netlink message header
963 * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
964 * @extack: extended ACK report struct
965 *
966 * Called under rtnl_lock
967 */
qeth_l2_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)968 static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
969 u16 flags, struct netlink_ext_ack *extack)
970 {
971 struct qeth_priv *priv = netdev_priv(dev);
972 struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
973 struct qeth_card *card = dev->ml_priv;
974 struct nlattr *attr, *nested_attr;
975 bool enable, has_protinfo = false;
976 int rem1, rem2;
977 int rc;
978
979 if (!netif_device_present(dev))
980 return -ENODEV;
981
982 nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
983 if (nla_type(attr) == IFLA_PROTINFO) {
984 rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
985 qeth_brport_policy, extack);
986 if (rc)
987 return rc;
988 has_protinfo = true;
989 } else if (nla_type(attr) == IFLA_AF_SPEC) {
990 nla_for_each_nested(nested_attr, attr, rem2) {
991 if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
992 continue;
993 NL_SET_ERR_MSG_ATTR(extack, nested_attr,
994 "Unsupported attribute");
995 return -EINVAL;
996 }
997 } else {
998 NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
999 return -EINVAL;
1000 }
1001 }
1002 if (!has_protinfo)
1003 return 0;
1004 if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
1005 return -EINVAL;
1006 if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
1007 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1008 "Operation not supported by HW");
1009 return -EOPNOTSUPP;
1010 }
1011 if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
1012 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1013 "Requires NET_SWITCHDEV");
1014 return -EOPNOTSUPP;
1015 }
1016 enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
1017
1018 if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
1019 return 0;
1020
1021 mutex_lock(&card->sbp_lock);
1022 /* do not change anything if BridgePort is enabled */
1023 if (qeth_bridgeport_is_in_use(card)) {
1024 NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
1025 rc = -EBUSY;
1026 } else if (enable) {
1027 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1028 rc = qeth_l2_dev2br_an_set(card, true);
1029 if (rc) {
1030 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1031 } else {
1032 priv->brport_features |= BR_LEARNING_SYNC;
1033 qeth_l2_br2dev_get();
1034 }
1035 } else {
1036 rc = qeth_l2_dev2br_an_set(card, false);
1037 if (!rc) {
1038 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1039 priv->brport_features ^= BR_LEARNING_SYNC;
1040 qeth_l2_dev2br_fdb_flush(card);
1041 qeth_l2_br2dev_put();
1042 }
1043 }
1044 mutex_unlock(&card->sbp_lock);
1045
1046 return rc;
1047 }
1048
1049 static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
1050 .ndo_open = qeth_open,
1051 .ndo_stop = qeth_stop,
1052 .ndo_get_stats64 = qeth_get_stats64,
1053 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1054 .ndo_features_check = qeth_features_check,
1055 .ndo_select_queue = qeth_l2_iqd_select_queue,
1056 .ndo_validate_addr = qeth_l2_validate_addr,
1057 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1058 .ndo_eth_ioctl = qeth_do_ioctl,
1059 .ndo_siocdevprivate = qeth_siocdevprivate,
1060 .ndo_set_mac_address = qeth_l2_set_mac_address,
1061 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1062 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1063 .ndo_tx_timeout = qeth_tx_timeout,
1064 .ndo_fix_features = qeth_fix_features,
1065 .ndo_set_features = qeth_set_features,
1066 .ndo_bridge_getlink = qeth_l2_bridge_getlink,
1067 .ndo_bridge_setlink = qeth_l2_bridge_setlink,
1068 };
1069
1070 static const struct net_device_ops qeth_l2_osa_netdev_ops = {
1071 .ndo_open = qeth_open,
1072 .ndo_stop = qeth_stop,
1073 .ndo_get_stats64 = qeth_get_stats64,
1074 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1075 .ndo_features_check = qeth_features_check,
1076 .ndo_select_queue = qeth_osa_select_queue,
1077 .ndo_validate_addr = qeth_l2_validate_addr,
1078 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1079 .ndo_eth_ioctl = qeth_do_ioctl,
1080 .ndo_siocdevprivate = qeth_siocdevprivate,
1081 .ndo_set_mac_address = qeth_l2_set_mac_address,
1082 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1083 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1084 .ndo_tx_timeout = qeth_tx_timeout,
1085 .ndo_fix_features = qeth_fix_features,
1086 .ndo_set_features = qeth_set_features,
1087 };
1088
qeth_l2_setup_netdev(struct qeth_card * card)1089 static int qeth_l2_setup_netdev(struct qeth_card *card)
1090 {
1091 card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
1092 &qeth_l2_osa_netdev_ops;
1093 card->dev->needed_headroom = sizeof(struct qeth_hdr);
1094 card->dev->priv_flags |= IFF_UNICAST_FLT;
1095
1096 if (IS_OSM(card)) {
1097 card->dev->features |= NETIF_F_VLAN_CHALLENGED;
1098 } else {
1099 if (!IS_VM_NIC(card))
1100 card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1101 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1102 }
1103
1104 if (IS_OSD(card) && !IS_VM_NIC(card)) {
1105 card->dev->features |= NETIF_F_SG;
1106 /* OSA 3S and earlier has no RX/TX support */
1107 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1108 card->dev->hw_features |= NETIF_F_IP_CSUM;
1109 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1110 }
1111 }
1112 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
1113 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
1114 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
1115 }
1116 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
1117 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
1118 card->dev->hw_features |= NETIF_F_RXCSUM;
1119 card->dev->vlan_features |= NETIF_F_RXCSUM;
1120 }
1121 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1122 card->dev->hw_features |= NETIF_F_TSO;
1123 card->dev->vlan_features |= NETIF_F_TSO;
1124 }
1125 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
1126 card->dev->hw_features |= NETIF_F_TSO6;
1127 card->dev->vlan_features |= NETIF_F_TSO6;
1128 }
1129
1130 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1131 card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
1132 netif_keep_dst(card->dev);
1133 netif_set_tso_max_size(card->dev,
1134 PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
1135 }
1136
1137 netif_napi_add(card->dev, &card->napi, qeth_poll);
1138 return register_netdev(card->dev);
1139 }
1140
qeth_l2_trace_features(struct qeth_card * card)1141 static void qeth_l2_trace_features(struct qeth_card *card)
1142 {
1143 /* Set BridgePort features */
1144 QETH_CARD_TEXT(card, 2, "featuSBP");
1145 QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
1146 sizeof(card->options.sbp.supported_funcs));
1147 /* VNIC Characteristics features */
1148 QETH_CARD_TEXT(card, 2, "feaVNICC");
1149 QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
1150 sizeof(card->options.vnicc.sup_chars));
1151 }
1152
qeth_l2_setup_bridgeport_attrs(struct qeth_card * card)1153 static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
1154 {
1155 if (!card->options.sbp.reflect_promisc &&
1156 card->options.sbp.role != QETH_SBP_ROLE_NONE) {
1157 /* Conditional to avoid spurious error messages */
1158 qeth_bridgeport_setrole(card, card->options.sbp.role);
1159 /* Let the callback function refresh the stored role value. */
1160 qeth_bridgeport_query_ports(card, &card->options.sbp.role,
1161 NULL);
1162 }
1163 if (card->options.sbp.hostnotification) {
1164 if (qeth_bridgeport_an_set(card, 1))
1165 card->options.sbp.hostnotification = 0;
1166 }
1167 }
1168
1169 /**
1170 * qeth_l2_detect_dev2br_support() -
1171 * Detect whether this card supports 'dev to bridge fdb network address
1172 * change notification' and thus can support the learning_sync bridgeport
1173 * attribute
1174 * @card: qeth_card structure pointer
1175 */
qeth_l2_detect_dev2br_support(struct qeth_card * card)1176 static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
1177 {
1178 struct qeth_priv *priv = netdev_priv(card->dev);
1179 bool dev2br_supported;
1180
1181 QETH_CARD_TEXT(card, 2, "d2brsup");
1182 if (!IS_IQD(card))
1183 return;
1184
1185 /* dev2br requires valid cssid,iid,chid */
1186 dev2br_supported = card->info.ids_valid &&
1187 css_general_characteristics.enarf;
1188 QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
1189
1190 if (dev2br_supported)
1191 priv->brport_hw_features |= BR_LEARNING_SYNC;
1192 else
1193 priv->brport_hw_features &= ~BR_LEARNING_SYNC;
1194 }
1195
qeth_l2_enable_brport_features(struct qeth_card * card)1196 static void qeth_l2_enable_brport_features(struct qeth_card *card)
1197 {
1198 struct qeth_priv *priv = netdev_priv(card->dev);
1199 int rc;
1200
1201 if (priv->brport_features & BR_LEARNING_SYNC) {
1202 if (priv->brport_hw_features & BR_LEARNING_SYNC) {
1203 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1204 rc = qeth_l2_dev2br_an_set(card, true);
1205 if (rc == -EAGAIN) {
1206 /* Recoverable error, retry once */
1207 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1208 qeth_l2_dev2br_fdb_flush(card);
1209 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1210 rc = qeth_l2_dev2br_an_set(card, true);
1211 }
1212 if (rc) {
1213 netdev_err(card->dev,
1214 "failed to enable bridge learning_sync: %d\n",
1215 rc);
1216 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1217 qeth_l2_dev2br_fdb_flush(card);
1218 priv->brport_features ^= BR_LEARNING_SYNC;
1219 }
1220 } else {
1221 dev_warn(&card->gdev->dev,
1222 "bridge learning_sync not supported\n");
1223 priv->brport_features ^= BR_LEARNING_SYNC;
1224 }
1225 }
1226 }
1227
1228 /* SETBRIDGEPORT support, async notifications */
1229
1230 enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
1231
1232 /**
1233 * qeth_bridge_emit_host_event() - bridgeport address change notification
1234 * @card: qeth_card structure pointer, for udev events.
1235 * @evtype: "normal" register/unregister, or abort, or reset. For abort
1236 * and reset token and addr_lnid are unused and may be NULL.
1237 * @code: event bitmask: high order bit 0x80 value 1 means removal of an
1238 * object, 0 - addition of an object.
1239 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
1240 * @token: "network token" structure identifying physical address of the port.
1241 * @addr_lnid: pointer to structure with MAC address and VLAN ID.
1242 *
1243 * This function is called when registrations and deregistrations are
1244 * reported by the hardware, and also when notifications are enabled -
1245 * for all currently registered addresses.
1246 */
qeth_bridge_emit_host_event(struct qeth_card * card,enum qeth_an_event_type evtype,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)1247 static void qeth_bridge_emit_host_event(struct qeth_card *card,
1248 enum qeth_an_event_type evtype,
1249 u8 code,
1250 struct net_if_token *token,
1251 struct mac_addr_lnid *addr_lnid)
1252 {
1253 char str[7][32];
1254 char *env[8];
1255 int i = 0;
1256
1257 switch (evtype) {
1258 case anev_reg_unreg:
1259 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
1260 (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
1261 ? "deregister" : "register");
1262 env[i] = str[i]; i++;
1263 if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
1264 scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
1265 addr_lnid->lnid);
1266 env[i] = str[i]; i++;
1267 }
1268 if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
1269 scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
1270 addr_lnid->mac);
1271 env[i] = str[i]; i++;
1272 }
1273 scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
1274 token->cssid, token->ssid, token->devnum);
1275 env[i] = str[i]; i++;
1276 scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
1277 env[i] = str[i]; i++;
1278 scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
1279 token->chpid);
1280 env[i] = str[i]; i++;
1281 scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
1282 token->chid);
1283 env[i] = str[i]; i++;
1284 break;
1285 case anev_abort:
1286 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
1287 env[i] = str[i]; i++;
1288 break;
1289 case anev_reset:
1290 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
1291 env[i] = str[i]; i++;
1292 break;
1293 }
1294 env[i] = NULL;
1295 kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
1296 }
1297
1298 struct qeth_bridge_state_data {
1299 struct work_struct worker;
1300 struct qeth_card *card;
1301 u8 role;
1302 u8 state;
1303 };
1304
qeth_bridge_state_change_worker(struct work_struct * work)1305 static void qeth_bridge_state_change_worker(struct work_struct *work)
1306 {
1307 struct qeth_bridge_state_data *data =
1308 container_of(work, struct qeth_bridge_state_data, worker);
1309 char env_locrem[32];
1310 char env_role[32];
1311 char env_state[32];
1312 char *env[] = {
1313 env_locrem,
1314 env_role,
1315 env_state,
1316 NULL
1317 };
1318
1319 scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
1320 scnprintf(env_role, sizeof(env_role), "ROLE=%s",
1321 (data->role == QETH_SBP_ROLE_NONE) ? "none" :
1322 (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
1323 (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
1324 "<INVALID>");
1325 scnprintf(env_state, sizeof(env_state), "STATE=%s",
1326 (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
1327 (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
1328 (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
1329 "<INVALID>");
1330 kobject_uevent_env(&data->card->gdev->dev.kobj,
1331 KOBJ_CHANGE, env);
1332 kfree(data);
1333 }
1334
qeth_bridge_state_change(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1335 static void qeth_bridge_state_change(struct qeth_card *card,
1336 struct qeth_ipa_cmd *cmd)
1337 {
1338 struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
1339 struct qeth_bridge_state_data *data;
1340
1341 QETH_CARD_TEXT(card, 2, "brstchng");
1342 if (qports->num_entries == 0) {
1343 QETH_CARD_TEXT(card, 2, "BPempty");
1344 return;
1345 }
1346 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1347 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
1348 return;
1349 }
1350
1351 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1352 if (!data) {
1353 QETH_CARD_TEXT(card, 2, "BPSalloc");
1354 return;
1355 }
1356 INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
1357 data->card = card;
1358 /* Information for the local port: */
1359 data->role = qports->entry[0].role;
1360 data->state = qports->entry[0].state;
1361
1362 queue_work(card->event_wq, &data->worker);
1363 }
1364
1365 struct qeth_addr_change_data {
1366 struct delayed_work dwork;
1367 struct qeth_card *card;
1368 struct qeth_ipacmd_addr_change ac_event;
1369 };
1370
qeth_l2_dev2br_worker(struct work_struct * work)1371 static void qeth_l2_dev2br_worker(struct work_struct *work)
1372 {
1373 struct delayed_work *dwork = to_delayed_work(work);
1374 struct qeth_addr_change_data *data;
1375 struct qeth_card *card;
1376 struct qeth_priv *priv;
1377 unsigned int i;
1378 int rc;
1379
1380 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1381 card = data->card;
1382 priv = netdev_priv(card->dev);
1383
1384 QETH_CARD_TEXT(card, 4, "dev2brew");
1385
1386 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1387 goto free;
1388
1389 if (data->ac_event.lost_event_mask) {
1390 /* Potential re-config in progress, try again later: */
1391 if (!rtnl_trylock()) {
1392 queue_delayed_work(card->event_wq, dwork,
1393 msecs_to_jiffies(100));
1394 return;
1395 }
1396
1397 if (!netif_device_present(card->dev)) {
1398 rtnl_unlock();
1399 goto free;
1400 }
1401
1402 QETH_DBF_MESSAGE(3,
1403 "Address change notification overflow on device %x\n",
1404 CARD_DEVID(card));
1405 /* Card fdb and bridge fdb are out of sync, card has stopped
1406 * notifications (no need to drain_workqueue). Purge all
1407 * 'extern_learn' entries from the parent bridge and restart
1408 * the notifications.
1409 */
1410 qeth_l2_dev2br_fdb_flush(card);
1411 rc = qeth_l2_dev2br_an_set(card, true);
1412 if (rc) {
1413 /* TODO: if we want to retry after -EAGAIN, be
1414 * aware there could be stale entries in the
1415 * workqueue now, that need to be drained.
1416 * For now we give up:
1417 */
1418 netdev_err(card->dev,
1419 "bridge learning_sync failed to recover: %d\n",
1420 rc);
1421 WRITE_ONCE(card->info.pnso_mode,
1422 QETH_PNSO_NONE);
1423 /* To remove fdb entries reported by an_set: */
1424 qeth_l2_dev2br_fdb_flush(card);
1425 priv->brport_features ^= BR_LEARNING_SYNC;
1426 } else {
1427 QETH_DBF_MESSAGE(3,
1428 "Address Notification resynced on device %x\n",
1429 CARD_DEVID(card));
1430 }
1431
1432 rtnl_unlock();
1433 } else {
1434 for (i = 0; i < data->ac_event.num_entries; i++) {
1435 struct qeth_ipacmd_addr_change_entry *entry =
1436 &data->ac_event.entry[i];
1437 qeth_l2_dev2br_fdb_notify(card,
1438 entry->change_code,
1439 &entry->token,
1440 &entry->addr_lnid);
1441 }
1442 }
1443
1444 free:
1445 kfree(data);
1446 }
1447
qeth_addr_change_event_worker(struct work_struct * work)1448 static void qeth_addr_change_event_worker(struct work_struct *work)
1449 {
1450 struct delayed_work *dwork = to_delayed_work(work);
1451 struct qeth_addr_change_data *data;
1452 struct qeth_card *card;
1453 int i;
1454
1455 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1456 card = data->card;
1457
1458 QETH_CARD_TEXT(data->card, 4, "adrchgew");
1459
1460 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1461 goto free;
1462
1463 if (data->ac_event.lost_event_mask) {
1464 /* Potential re-config in progress, try again later: */
1465 if (!mutex_trylock(&card->sbp_lock)) {
1466 queue_delayed_work(card->event_wq, dwork,
1467 msecs_to_jiffies(100));
1468 return;
1469 }
1470
1471 dev_info(&data->card->gdev->dev,
1472 "Address change notification stopped on %s (%s)\n",
1473 netdev_name(card->dev),
1474 (data->ac_event.lost_event_mask == 0x01)
1475 ? "Overflow"
1476 : (data->ac_event.lost_event_mask == 0x02)
1477 ? "Bridge port state change"
1478 : "Unknown reason");
1479
1480 data->card->options.sbp.hostnotification = 0;
1481 card->info.pnso_mode = QETH_PNSO_NONE;
1482 mutex_unlock(&data->card->sbp_lock);
1483 qeth_bridge_emit_host_event(data->card, anev_abort,
1484 0, NULL, NULL);
1485 } else
1486 for (i = 0; i < data->ac_event.num_entries; i++) {
1487 struct qeth_ipacmd_addr_change_entry *entry =
1488 &data->ac_event.entry[i];
1489 qeth_bridge_emit_host_event(data->card,
1490 anev_reg_unreg,
1491 entry->change_code,
1492 &entry->token,
1493 &entry->addr_lnid);
1494 }
1495
1496 free:
1497 kfree(data);
1498 }
1499
qeth_addr_change_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1500 static void qeth_addr_change_event(struct qeth_card *card,
1501 struct qeth_ipa_cmd *cmd)
1502 {
1503 struct qeth_ipacmd_addr_change *hostevs =
1504 &cmd->data.addrchange;
1505 struct qeth_addr_change_data *data;
1506 int extrasize;
1507
1508 if (card->info.pnso_mode == QETH_PNSO_NONE)
1509 return;
1510
1511 QETH_CARD_TEXT(card, 4, "adrchgev");
1512 if (cmd->hdr.return_code != 0x0000) {
1513 if (cmd->hdr.return_code == 0x0010) {
1514 if (hostevs->lost_event_mask == 0x00)
1515 hostevs->lost_event_mask = 0xff;
1516 } else {
1517 QETH_CARD_TEXT_(card, 2, "ACHN%04x",
1518 cmd->hdr.return_code);
1519 return;
1520 }
1521 }
1522 extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
1523 hostevs->num_entries;
1524 data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
1525 GFP_ATOMIC);
1526 if (!data) {
1527 QETH_CARD_TEXT(card, 2, "ACNalloc");
1528 return;
1529 }
1530 if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
1531 INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
1532 else
1533 INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
1534 data->card = card;
1535 data->ac_event = *hostevs;
1536 memcpy(data->ac_event.entry, hostevs->entry, extrasize);
1537 queue_delayed_work(card->event_wq, &data->dwork, 0);
1538 }
1539
1540 /* SETBRIDGEPORT support; sending commands */
1541
1542 struct _qeth_sbp_cbctl {
1543 union {
1544 u32 supported;
1545 struct {
1546 enum qeth_sbp_roles *role;
1547 enum qeth_sbp_states *state;
1548 } qports;
1549 } data;
1550 };
1551
qeth_bridgeport_makerc(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1552 static int qeth_bridgeport_makerc(struct qeth_card *card,
1553 struct qeth_ipa_cmd *cmd)
1554 {
1555 struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
1556 enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
1557 u16 ipa_rc = cmd->hdr.return_code;
1558 u16 sbp_rc = sbp->hdr.return_code;
1559 int rc;
1560
1561 if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
1562 return 0;
1563
1564 if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
1565 (!IS_IQD(card) && ipa_rc == sbp_rc)) {
1566 switch (sbp_rc) {
1567 case IPA_RC_SUCCESS:
1568 rc = 0;
1569 break;
1570 case IPA_RC_L2_UNSUPPORTED_CMD:
1571 case IPA_RC_UNSUPPORTED_COMMAND:
1572 rc = -EOPNOTSUPP;
1573 break;
1574 case IPA_RC_SBP_OSA_NOT_CONFIGURED:
1575 case IPA_RC_SBP_IQD_NOT_CONFIGURED:
1576 rc = -ENODEV; /* maybe not the best code here? */
1577 dev_err(&card->gdev->dev,
1578 "The device is not configured as a Bridge Port\n");
1579 break;
1580 case IPA_RC_SBP_OSA_OS_MISMATCH:
1581 case IPA_RC_SBP_IQD_OS_MISMATCH:
1582 rc = -EPERM;
1583 dev_err(&card->gdev->dev,
1584 "A Bridge Port is already configured by a different operating system\n");
1585 break;
1586 case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
1587 case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
1588 switch (setcmd) {
1589 case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
1590 rc = -EEXIST;
1591 dev_err(&card->gdev->dev,
1592 "The LAN already has a primary Bridge Port\n");
1593 break;
1594 case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
1595 rc = -EBUSY;
1596 dev_err(&card->gdev->dev,
1597 "The device is already a primary Bridge Port\n");
1598 break;
1599 default:
1600 rc = -EIO;
1601 }
1602 break;
1603 case IPA_RC_SBP_OSA_CURRENT_SECOND:
1604 case IPA_RC_SBP_IQD_CURRENT_SECOND:
1605 rc = -EBUSY;
1606 dev_err(&card->gdev->dev,
1607 "The device is already a secondary Bridge Port\n");
1608 break;
1609 case IPA_RC_SBP_OSA_LIMIT_SECOND:
1610 case IPA_RC_SBP_IQD_LIMIT_SECOND:
1611 rc = -EEXIST;
1612 dev_err(&card->gdev->dev,
1613 "The LAN cannot have more secondary Bridge Ports\n");
1614 break;
1615 case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
1616 case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
1617 rc = -EBUSY;
1618 dev_err(&card->gdev->dev,
1619 "The device is already a primary Bridge Port\n");
1620 break;
1621 case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
1622 case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
1623 rc = -EACCES;
1624 dev_err(&card->gdev->dev,
1625 "The device is not authorized to be a Bridge Port\n");
1626 break;
1627 default:
1628 rc = -EIO;
1629 }
1630 } else {
1631 switch (ipa_rc) {
1632 case IPA_RC_NOTSUPP:
1633 rc = -EOPNOTSUPP;
1634 break;
1635 case IPA_RC_UNSUPPORTED_COMMAND:
1636 rc = -EOPNOTSUPP;
1637 break;
1638 default:
1639 rc = -EIO;
1640 }
1641 }
1642
1643 if (rc) {
1644 QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
1645 QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
1646 }
1647 return rc;
1648 }
1649
qeth_sbp_build_cmd(struct qeth_card * card,enum qeth_ipa_sbp_cmd sbp_cmd,unsigned int data_length)1650 static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
1651 enum qeth_ipa_sbp_cmd sbp_cmd,
1652 unsigned int data_length)
1653 {
1654 enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
1655 IPA_CMD_SETBRIDGEPORT_OSA;
1656 struct qeth_ipacmd_sbp_hdr *hdr;
1657 struct qeth_cmd_buffer *iob;
1658
1659 iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
1660 data_length +
1661 offsetof(struct qeth_ipacmd_setbridgeport,
1662 data));
1663 if (!iob)
1664 return iob;
1665
1666 hdr = &__ipa_cmd(iob)->data.sbp.hdr;
1667 hdr->cmdlength = sizeof(*hdr) + data_length;
1668 hdr->command_code = sbp_cmd;
1669 hdr->used_total = 1;
1670 hdr->seq_no = 1;
1671 return iob;
1672 }
1673
qeth_bridgeport_query_support_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1674 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
1675 struct qeth_reply *reply, unsigned long data)
1676 {
1677 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1678 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1679 int rc;
1680
1681 QETH_CARD_TEXT(card, 2, "brqsupcb");
1682 rc = qeth_bridgeport_makerc(card, cmd);
1683 if (rc)
1684 return rc;
1685
1686 cbctl->data.supported =
1687 cmd->data.sbp.data.query_cmds_supp.supported_cmds;
1688 return 0;
1689 }
1690
1691 /**
1692 * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
1693 * @card: qeth_card structure pointer.
1694 *
1695 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
1696 * strucutre: card->options.sbp.supported_funcs.
1697 */
qeth_bridgeport_query_support(struct qeth_card * card)1698 static void qeth_bridgeport_query_support(struct qeth_card *card)
1699 {
1700 struct qeth_cmd_buffer *iob;
1701 struct _qeth_sbp_cbctl cbctl;
1702
1703 QETH_CARD_TEXT(card, 2, "brqsuppo");
1704 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
1705 SBP_DATA_SIZEOF(query_cmds_supp));
1706 if (!iob)
1707 return;
1708
1709 if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
1710 &cbctl)) {
1711 card->options.sbp.role = QETH_SBP_ROLE_NONE;
1712 card->options.sbp.supported_funcs = 0;
1713 return;
1714 }
1715 card->options.sbp.supported_funcs = cbctl.data.supported;
1716 }
1717
qeth_bridgeport_query_ports_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1718 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
1719 struct qeth_reply *reply, unsigned long data)
1720 {
1721 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1722 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1723 struct qeth_sbp_port_data *qports;
1724 int rc;
1725
1726 QETH_CARD_TEXT(card, 2, "brqprtcb");
1727 rc = qeth_bridgeport_makerc(card, cmd);
1728 if (rc)
1729 return rc;
1730
1731 qports = &cmd->data.sbp.data.port_data;
1732 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1733 QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
1734 return -EINVAL;
1735 }
1736 /* first entry contains the state of the local port */
1737 if (qports->num_entries > 0) {
1738 if (cbctl->data.qports.role)
1739 *cbctl->data.qports.role = qports->entry[0].role;
1740 if (cbctl->data.qports.state)
1741 *cbctl->data.qports.state = qports->entry[0].state;
1742 }
1743 return 0;
1744 }
1745
1746 /**
1747 * qeth_bridgeport_query_ports() - query local bridgeport status.
1748 * @card: qeth_card structure pointer.
1749 * @role: Role of the port: 0-none, 1-primary, 2-secondary.
1750 * @state: State of the port: 0-inactive, 1-standby, 2-active.
1751 *
1752 * Returns negative errno-compatible error indication or 0 on success.
1753 *
1754 * 'role' and 'state' are not updated in case of hardware operation failure.
1755 */
qeth_bridgeport_query_ports(struct qeth_card * card,enum qeth_sbp_roles * role,enum qeth_sbp_states * state)1756 int qeth_bridgeport_query_ports(struct qeth_card *card,
1757 enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
1758 {
1759 struct qeth_cmd_buffer *iob;
1760 struct _qeth_sbp_cbctl cbctl = {
1761 .data = {
1762 .qports = {
1763 .role = role,
1764 .state = state,
1765 },
1766 },
1767 };
1768
1769 QETH_CARD_TEXT(card, 2, "brqports");
1770 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1771 return -EOPNOTSUPP;
1772 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
1773 if (!iob)
1774 return -ENOMEM;
1775
1776 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
1777 &cbctl);
1778 }
1779
qeth_bridgeport_set_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1780 static int qeth_bridgeport_set_cb(struct qeth_card *card,
1781 struct qeth_reply *reply, unsigned long data)
1782 {
1783 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
1784
1785 QETH_CARD_TEXT(card, 2, "brsetrcb");
1786 return qeth_bridgeport_makerc(card, cmd);
1787 }
1788
1789 /**
1790 * qeth_bridgeport_setrole() - Assign primary role to the port.
1791 * @card: qeth_card structure pointer.
1792 * @role: Role to assign.
1793 *
1794 * Returns negative errno-compatible error indication or 0 on success.
1795 */
qeth_bridgeport_setrole(struct qeth_card * card,enum qeth_sbp_roles role)1796 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1797 {
1798 struct qeth_cmd_buffer *iob;
1799 enum qeth_ipa_sbp_cmd setcmd;
1800 unsigned int cmdlength = 0;
1801
1802 QETH_CARD_TEXT(card, 2, "brsetrol");
1803 switch (role) {
1804 case QETH_SBP_ROLE_NONE:
1805 setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
1806 break;
1807 case QETH_SBP_ROLE_PRIMARY:
1808 setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
1809 cmdlength = SBP_DATA_SIZEOF(set_primary);
1810 break;
1811 case QETH_SBP_ROLE_SECONDARY:
1812 setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
1813 break;
1814 default:
1815 return -EINVAL;
1816 }
1817 if (!(card->options.sbp.supported_funcs & setcmd))
1818 return -EOPNOTSUPP;
1819 iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
1820 if (!iob)
1821 return -ENOMEM;
1822
1823 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
1824 }
1825
qeth_bridgeport_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)1826 static void qeth_bridgeport_an_set_cb(void *priv,
1827 struct chsc_pnso_naid_l2 *entry)
1828 {
1829 struct qeth_card *card = (struct qeth_card *)priv;
1830 u8 code;
1831
1832 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1833 if (entry->addr_lnid.lnid < VLAN_N_VID)
1834 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1835 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1836 (struct net_if_token *)&entry->nit,
1837 (struct mac_addr_lnid *)&entry->addr_lnid);
1838 }
1839
1840 /**
1841 * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
1842 * @card: qeth_card structure pointer.
1843 * @enable: 0 - disable, non-zero - enable notifications
1844 *
1845 * Returns negative errno-compatible error indication or 0 on success.
1846 *
1847 * On enable, emits a series of address notifications udev events for all
1848 * currently registered hosts.
1849 */
qeth_bridgeport_an_set(struct qeth_card * card,int enable)1850 int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
1851 {
1852 int rc;
1853
1854 if (!card->options.sbp.supported_funcs)
1855 return -EOPNOTSUPP;
1856
1857 if (enable) {
1858 qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
1859 qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
1860 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
1861 qeth_bridgeport_an_set_cb, card);
1862 if (rc)
1863 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1864 } else {
1865 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
1866 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1867 }
1868 return rc;
1869 }
1870
1871 /* VNIC Characteristics support */
1872
1873 /* handle VNICC IPA command return codes; convert to error codes */
qeth_l2_vnicc_makerc(struct qeth_card * card,u16 ipa_rc)1874 static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
1875 {
1876 int rc;
1877
1878 switch (ipa_rc) {
1879 case IPA_RC_SUCCESS:
1880 return ipa_rc;
1881 case IPA_RC_L2_UNSUPPORTED_CMD:
1882 case IPA_RC_NOTSUPP:
1883 rc = -EOPNOTSUPP;
1884 break;
1885 case IPA_RC_VNICC_OOSEQ:
1886 rc = -EALREADY;
1887 break;
1888 case IPA_RC_VNICC_VNICBP:
1889 rc = -EBUSY;
1890 break;
1891 case IPA_RC_L2_ADDR_TABLE_FULL:
1892 rc = -ENOSPC;
1893 break;
1894 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
1895 rc = -EACCES;
1896 break;
1897 default:
1898 rc = -EIO;
1899 }
1900
1901 QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
1902 return rc;
1903 }
1904
1905 /* generic VNICC request call back */
qeth_l2_vnicc_request_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1906 static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
1907 struct qeth_reply *reply,
1908 unsigned long data)
1909 {
1910 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1911 struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
1912 u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
1913
1914 QETH_CARD_TEXT(card, 2, "vniccrcb");
1915 if (cmd->hdr.return_code)
1916 return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
1917 /* return results to caller */
1918 card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
1919 card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
1920
1921 if (sub_cmd == IPA_VNICC_QUERY_CMDS)
1922 *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
1923 else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
1924 *(u32 *)reply->param = rep->data.getset_timeout.timeout;
1925
1926 return 0;
1927 }
1928
qeth_l2_vnicc_build_cmd(struct qeth_card * card,u32 vnicc_cmd,unsigned int data_length)1929 static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
1930 u32 vnicc_cmd,
1931 unsigned int data_length)
1932 {
1933 struct qeth_ipacmd_vnicc_hdr *hdr;
1934 struct qeth_cmd_buffer *iob;
1935
1936 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
1937 data_length +
1938 offsetof(struct qeth_ipacmd_vnicc, data));
1939 if (!iob)
1940 return NULL;
1941
1942 hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
1943 hdr->data_length = sizeof(*hdr) + data_length;
1944 hdr->sub_command = vnicc_cmd;
1945 return iob;
1946 }
1947
1948 /* VNICC query VNIC characteristics request */
qeth_l2_vnicc_query_chars(struct qeth_card * card)1949 static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
1950 {
1951 struct qeth_cmd_buffer *iob;
1952
1953 QETH_CARD_TEXT(card, 2, "vniccqch");
1954 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
1955 if (!iob)
1956 return -ENOMEM;
1957
1958 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1959 }
1960
1961 /* VNICC query sub commands request */
qeth_l2_vnicc_query_cmds(struct qeth_card * card,u32 vnic_char,u32 * sup_cmds)1962 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
1963 u32 *sup_cmds)
1964 {
1965 struct qeth_cmd_buffer *iob;
1966
1967 QETH_CARD_TEXT(card, 2, "vniccqcm");
1968 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
1969 VNICC_DATA_SIZEOF(query_cmds));
1970 if (!iob)
1971 return -ENOMEM;
1972
1973 __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
1974
1975 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
1976 }
1977
1978 /* VNICC enable/disable characteristic request */
qeth_l2_vnicc_set_char(struct qeth_card * card,u32 vnic_char,u32 cmd)1979 static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
1980 u32 cmd)
1981 {
1982 struct qeth_cmd_buffer *iob;
1983
1984 QETH_CARD_TEXT(card, 2, "vniccedc");
1985 iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
1986 if (!iob)
1987 return -ENOMEM;
1988
1989 __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
1990
1991 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1992 }
1993
1994 /* VNICC get/set timeout for characteristic request */
qeth_l2_vnicc_getset_timeout(struct qeth_card * card,u32 vnicc,u32 cmd,u32 * timeout)1995 static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
1996 u32 cmd, u32 *timeout)
1997 {
1998 struct qeth_vnicc_getset_timeout *getset_timeout;
1999 struct qeth_cmd_buffer *iob;
2000
2001 QETH_CARD_TEXT(card, 2, "vniccgst");
2002 iob = qeth_l2_vnicc_build_cmd(card, cmd,
2003 VNICC_DATA_SIZEOF(getset_timeout));
2004 if (!iob)
2005 return -ENOMEM;
2006
2007 getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
2008 getset_timeout->vnic_char = vnicc;
2009
2010 if (cmd == IPA_VNICC_SET_TIMEOUT)
2011 getset_timeout->timeout = *timeout;
2012
2013 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
2014 }
2015
2016 /* recover user timeout setting */
qeth_l2_vnicc_recover_timeout(struct qeth_card * card,u32 vnicc,u32 * timeout)2017 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
2018 u32 *timeout)
2019 {
2020 if (card->options.vnicc.sup_chars & vnicc &&
2021 card->options.vnicc.getset_timeout_sup & vnicc &&
2022 !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
2023 timeout))
2024 return false;
2025 *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2026 return true;
2027 }
2028
2029 /* set current VNICC flag state; called from sysfs store function */
qeth_l2_vnicc_set_state(struct qeth_card * card,u32 vnicc,bool state)2030 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
2031 {
2032 int rc = 0;
2033 u32 cmd;
2034
2035 QETH_CARD_TEXT(card, 2, "vniccsch");
2036
2037 /* check if characteristic and enable/disable are supported */
2038 if (!(card->options.vnicc.sup_chars & vnicc) ||
2039 !(card->options.vnicc.set_char_sup & vnicc))
2040 return -EOPNOTSUPP;
2041
2042 if (qeth_bridgeport_is_in_use(card))
2043 return -EBUSY;
2044
2045 /* set enable/disable command and store wanted characteristic */
2046 if (state) {
2047 cmd = IPA_VNICC_ENABLE;
2048 card->options.vnicc.wanted_chars |= vnicc;
2049 } else {
2050 cmd = IPA_VNICC_DISABLE;
2051 card->options.vnicc.wanted_chars &= ~vnicc;
2052 }
2053
2054 /* do we need to do anything? */
2055 if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
2056 return rc;
2057
2058 /* if card is not ready, simply stop here */
2059 if (!qeth_card_hw_is_reachable(card)) {
2060 if (state)
2061 card->options.vnicc.cur_chars |= vnicc;
2062 else
2063 card->options.vnicc.cur_chars &= ~vnicc;
2064 return rc;
2065 }
2066
2067 rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
2068 if (rc)
2069 card->options.vnicc.wanted_chars =
2070 card->options.vnicc.cur_chars;
2071 else {
2072 /* successful online VNICC change; handle special cases */
2073 if (state && vnicc == QETH_VNICC_RX_BCAST)
2074 card->options.vnicc.rx_bcast_enabled = true;
2075 if (!state && vnicc == QETH_VNICC_LEARNING)
2076 qeth_l2_vnicc_recover_timeout(card, vnicc,
2077 &card->options.vnicc.learning_timeout);
2078 }
2079
2080 return rc;
2081 }
2082
2083 /* get current VNICC flag state; called from sysfs show function */
qeth_l2_vnicc_get_state(struct qeth_card * card,u32 vnicc,bool * state)2084 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
2085 {
2086 int rc = 0;
2087
2088 QETH_CARD_TEXT(card, 2, "vniccgch");
2089
2090 /* check if characteristic is supported */
2091 if (!(card->options.vnicc.sup_chars & vnicc))
2092 return -EOPNOTSUPP;
2093
2094 if (qeth_bridgeport_is_in_use(card))
2095 return -EBUSY;
2096
2097 /* if card is ready, query current VNICC state */
2098 if (qeth_card_hw_is_reachable(card))
2099 rc = qeth_l2_vnicc_query_chars(card);
2100
2101 *state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
2102 return rc;
2103 }
2104
2105 /* set VNICC timeout; called from sysfs store function. Currently, only learning
2106 * supports timeout
2107 */
qeth_l2_vnicc_set_timeout(struct qeth_card * card,u32 timeout)2108 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
2109 {
2110 int rc = 0;
2111
2112 QETH_CARD_TEXT(card, 2, "vniccsto");
2113
2114 /* check if characteristic and set_timeout are supported */
2115 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2116 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2117 return -EOPNOTSUPP;
2118
2119 if (qeth_bridgeport_is_in_use(card))
2120 return -EBUSY;
2121
2122 /* do we need to do anything? */
2123 if (card->options.vnicc.learning_timeout == timeout)
2124 return rc;
2125
2126 /* if card is not ready, simply store the value internally and return */
2127 if (!qeth_card_hw_is_reachable(card)) {
2128 card->options.vnicc.learning_timeout = timeout;
2129 return rc;
2130 }
2131
2132 /* send timeout value to card; if successful, store value internally */
2133 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2134 IPA_VNICC_SET_TIMEOUT, &timeout);
2135 if (!rc)
2136 card->options.vnicc.learning_timeout = timeout;
2137
2138 return rc;
2139 }
2140
2141 /* get current VNICC timeout; called from sysfs show function. Currently, only
2142 * learning supports timeout
2143 */
qeth_l2_vnicc_get_timeout(struct qeth_card * card,u32 * timeout)2144 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
2145 {
2146 int rc = 0;
2147
2148 QETH_CARD_TEXT(card, 2, "vniccgto");
2149
2150 /* check if characteristic and get_timeout are supported */
2151 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2152 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2153 return -EOPNOTSUPP;
2154
2155 if (qeth_bridgeport_is_in_use(card))
2156 return -EBUSY;
2157
2158 /* if card is ready, get timeout. Otherwise, just return stored value */
2159 *timeout = card->options.vnicc.learning_timeout;
2160 if (qeth_card_hw_is_reachable(card))
2161 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2162 IPA_VNICC_GET_TIMEOUT,
2163 timeout);
2164
2165 return rc;
2166 }
2167
2168 /* check if VNICC is currently enabled */
_qeth_l2_vnicc_is_in_use(struct qeth_card * card)2169 static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
2170 {
2171 if (!card->options.vnicc.sup_chars)
2172 return false;
2173 /* default values are only OK if rx_bcast was not enabled by user
2174 * or the card is offline.
2175 */
2176 if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
2177 if (!card->options.vnicc.rx_bcast_enabled ||
2178 !qeth_card_hw_is_reachable(card))
2179 return false;
2180 }
2181 return true;
2182 }
2183
2184 /**
2185 * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
2186 * @card: qeth_card structure pointer
2187 *
2188 * qeth_bridgeport functionality is mutually exclusive with usage of the
2189 * VNIC Characteristics and dev2br address notifications
2190 */
qeth_bridgeport_allowed(struct qeth_card * card)2191 bool qeth_bridgeport_allowed(struct qeth_card *card)
2192 {
2193 struct qeth_priv *priv = netdev_priv(card->dev);
2194
2195 return (!_qeth_l2_vnicc_is_in_use(card) &&
2196 !(priv->brport_features & BR_LEARNING_SYNC));
2197 }
2198
2199 /* recover user characteristic setting */
qeth_l2_vnicc_recover_char(struct qeth_card * card,u32 vnicc,bool enable)2200 static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
2201 bool enable)
2202 {
2203 u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
2204
2205 if (card->options.vnicc.sup_chars & vnicc &&
2206 card->options.vnicc.set_char_sup & vnicc &&
2207 !qeth_l2_vnicc_set_char(card, vnicc, cmd))
2208 return false;
2209 card->options.vnicc.wanted_chars &= ~vnicc;
2210 card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
2211 return true;
2212 }
2213
2214 /* (re-)initialize VNICC */
qeth_l2_vnicc_init(struct qeth_card * card)2215 static void qeth_l2_vnicc_init(struct qeth_card *card)
2216 {
2217 u32 *timeout = &card->options.vnicc.learning_timeout;
2218 bool enable, error = false;
2219 unsigned int chars_len, i;
2220 unsigned long chars_tmp;
2221 u32 sup_cmds, vnicc;
2222
2223 QETH_CARD_TEXT(card, 2, "vniccini");
2224 /* reset rx_bcast */
2225 card->options.vnicc.rx_bcast_enabled = 0;
2226 /* initial query and storage of VNIC characteristics */
2227 if (qeth_l2_vnicc_query_chars(card)) {
2228 if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
2229 *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
2230 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2231 /* fail quietly if user didn't change the default config */
2232 card->options.vnicc.sup_chars = 0;
2233 card->options.vnicc.cur_chars = 0;
2234 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2235 return;
2236 }
2237 /* get supported commands for each supported characteristic */
2238 chars_tmp = card->options.vnicc.sup_chars;
2239 chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
2240 for_each_set_bit(i, &chars_tmp, chars_len) {
2241 vnicc = BIT(i);
2242 if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
2243 sup_cmds = 0;
2244 error = true;
2245 }
2246 if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
2247 (sup_cmds & IPA_VNICC_GET_TIMEOUT))
2248 card->options.vnicc.getset_timeout_sup |= vnicc;
2249 else
2250 card->options.vnicc.getset_timeout_sup &= ~vnicc;
2251 if ((sup_cmds & IPA_VNICC_ENABLE) &&
2252 (sup_cmds & IPA_VNICC_DISABLE))
2253 card->options.vnicc.set_char_sup |= vnicc;
2254 else
2255 card->options.vnicc.set_char_sup &= ~vnicc;
2256 }
2257 /* enforce assumed default values and recover settings, if changed */
2258 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
2259 timeout);
2260 /* Change chars, if necessary */
2261 chars_tmp = card->options.vnicc.wanted_chars ^
2262 card->options.vnicc.cur_chars;
2263 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
2264 for_each_set_bit(i, &chars_tmp, chars_len) {
2265 vnicc = BIT(i);
2266 enable = card->options.vnicc.wanted_chars & vnicc;
2267 error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
2268 }
2269 if (error)
2270 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2271 }
2272
2273 /* configure default values of VNIC characteristics */
qeth_l2_vnicc_set_defaults(struct qeth_card * card)2274 static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
2275 {
2276 /* characteristics values */
2277 card->options.vnicc.sup_chars = QETH_VNICC_ALL;
2278 card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
2279 card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2280 /* supported commands */
2281 card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
2282 card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
2283 /* settings wanted by users */
2284 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2285 }
2286
2287 static const struct device_type qeth_l2_devtype = {
2288 .name = "qeth_layer2",
2289 .groups = qeth_l2_attr_groups,
2290 };
2291
qeth_l2_probe_device(struct ccwgroup_device * gdev)2292 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
2293 {
2294 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2295 int rc;
2296
2297 qeth_l2_vnicc_set_defaults(card);
2298 mutex_init(&card->sbp_lock);
2299
2300 if (gdev->dev.type) {
2301 rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
2302 if (rc)
2303 return rc;
2304 } else {
2305 gdev->dev.type = &qeth_l2_devtype;
2306 }
2307
2308 INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
2309 return 0;
2310 }
2311
qeth_l2_remove_device(struct ccwgroup_device * gdev)2312 static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
2313 {
2314 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2315 struct qeth_priv *priv;
2316
2317 if (gdev->dev.type != &qeth_l2_devtype)
2318 device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
2319
2320 qeth_set_allowed_threads(card, 0, 1);
2321 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2322
2323 if (gdev->state == CCWGROUP_ONLINE)
2324 qeth_set_offline(card, card->discipline, false);
2325
2326 if (card->dev->reg_state == NETREG_REGISTERED) {
2327 priv = netdev_priv(card->dev);
2328 if (priv->brport_features & BR_LEARNING_SYNC) {
2329 rtnl_lock();
2330 qeth_l2_br2dev_put();
2331 rtnl_unlock();
2332 }
2333 unregister_netdev(card->dev);
2334 }
2335 }
2336
qeth_l2_set_online(struct qeth_card * card,bool carrier_ok)2337 static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
2338 {
2339 struct net_device *dev = card->dev;
2340 int rc = 0;
2341
2342 qeth_l2_detect_dev2br_support(card);
2343
2344 mutex_lock(&card->sbp_lock);
2345 qeth_bridgeport_query_support(card);
2346 if (card->options.sbp.supported_funcs) {
2347 qeth_l2_setup_bridgeport_attrs(card);
2348 dev_info(&card->gdev->dev,
2349 "The device represents a Bridge Capable Port\n");
2350 }
2351 mutex_unlock(&card->sbp_lock);
2352
2353 qeth_l2_register_dev_addr(card);
2354
2355 /* for the rx_bcast characteristic, init VNICC after setmac */
2356 qeth_l2_vnicc_init(card);
2357
2358 qeth_l2_trace_features(card);
2359
2360 /* softsetup */
2361 QETH_CARD_TEXT(card, 2, "softsetp");
2362
2363 card->state = CARD_STATE_SOFTSETUP;
2364
2365 qeth_set_allowed_threads(card, 0xffffffff, 0);
2366
2367 if (dev->reg_state != NETREG_REGISTERED) {
2368 rc = qeth_l2_setup_netdev(card);
2369 if (rc)
2370 goto err_setup;
2371
2372 if (carrier_ok)
2373 netif_carrier_on(dev);
2374 } else {
2375 rtnl_lock();
2376 rc = qeth_set_real_num_tx_queues(card,
2377 qeth_tx_actual_queues(card));
2378 if (rc) {
2379 rtnl_unlock();
2380 goto err_set_queues;
2381 }
2382
2383 if (carrier_ok)
2384 netif_carrier_on(dev);
2385 else
2386 netif_carrier_off(dev);
2387
2388 netif_device_attach(dev);
2389 qeth_enable_hw_features(dev);
2390 qeth_l2_enable_brport_features(card);
2391
2392 if (netif_running(dev)) {
2393 local_bh_disable();
2394 napi_schedule(&card->napi);
2395 /* kick-start the NAPI softirq: */
2396 local_bh_enable();
2397 qeth_l2_set_rx_mode(dev);
2398 }
2399 rtnl_unlock();
2400 }
2401 return 0;
2402
2403 err_set_queues:
2404 err_setup:
2405 qeth_set_allowed_threads(card, 0, 1);
2406 card->state = CARD_STATE_DOWN;
2407 return rc;
2408 }
2409
qeth_l2_set_offline(struct qeth_card * card)2410 static void qeth_l2_set_offline(struct qeth_card *card)
2411 {
2412 struct qeth_priv *priv = netdev_priv(card->dev);
2413
2414 qeth_set_allowed_threads(card, 0, 1);
2415 qeth_l2_drain_rx_mode_cache(card);
2416
2417 if (card->state == CARD_STATE_SOFTSETUP)
2418 card->state = CARD_STATE_DOWN;
2419
2420 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
2421 if (priv->brport_features & BR_LEARNING_SYNC)
2422 qeth_l2_dev2br_fdb_flush(card);
2423 }
2424
2425 /* Returns zero if the command is successfully "consumed" */
qeth_l2_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2426 static int qeth_l2_control_event(struct qeth_card *card,
2427 struct qeth_ipa_cmd *cmd)
2428 {
2429 switch (cmd->hdr.command) {
2430 case IPA_CMD_SETBRIDGEPORT_OSA:
2431 case IPA_CMD_SETBRIDGEPORT_IQD:
2432 if (cmd->data.sbp.hdr.command_code ==
2433 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
2434 qeth_bridge_state_change(card, cmd);
2435 return 0;
2436 }
2437
2438 return 1;
2439 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
2440 qeth_addr_change_event(card, cmd);
2441 return 0;
2442 default:
2443 return 1;
2444 }
2445 }
2446
2447 const struct qeth_discipline qeth_l2_discipline = {
2448 .setup = qeth_l2_probe_device,
2449 .remove = qeth_l2_remove_device,
2450 .set_online = qeth_l2_set_online,
2451 .set_offline = qeth_l2_set_offline,
2452 .control_event_handler = qeth_l2_control_event,
2453 };
2454 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
2455
qeth_l2_init(void)2456 static int __init qeth_l2_init(void)
2457 {
2458 pr_info("register layer 2 discipline\n");
2459 refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
2460 return 0;
2461 }
2462
qeth_l2_exit(void)2463 static void __exit qeth_l2_exit(void)
2464 {
2465 pr_info("unregister layer 2 discipline\n");
2466 }
2467
2468 module_init(qeth_l2_init);
2469 module_exit(qeth_l2_exit);
2470 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2471 MODULE_DESCRIPTION("qeth layer 2 discipline");
2472 MODULE_LICENSE("GPL");
2473