1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 * Frank Pavlic <fpavlic@de.ibm.com>,
6 * Thomas Spatzier <tspat@de.ibm.com>,
7 * Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/export.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/etherdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/list.h>
23 #include <linux/hash.h>
24 #include <linux/hashtable.h>
25 #include <net/switchdev.h>
26 #include <asm/machine.h>
27 #include <asm/chsc.h>
28 #include <asm/css_chars.h>
29 #include <asm/setup.h>
30 #include "qeth_core.h"
31 #include "qeth_l2.h"
32
qeth_l2_setdelmac_makerc(struct qeth_card * card,u16 retcode)33 static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
34 {
35 int rc;
36
37 if (retcode)
38 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
39 switch (retcode) {
40 case IPA_RC_SUCCESS:
41 rc = 0;
42 break;
43 case IPA_RC_L2_UNSUPPORTED_CMD:
44 rc = -EOPNOTSUPP;
45 break;
46 case IPA_RC_L2_ADDR_TABLE_FULL:
47 rc = -ENOSPC;
48 break;
49 case IPA_RC_L2_DUP_MAC:
50 case IPA_RC_L2_DUP_LAYER3_MAC:
51 rc = -EADDRINUSE;
52 break;
53 case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
54 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
55 rc = -EADDRNOTAVAIL;
56 break;
57 case IPA_RC_L2_MAC_NOT_FOUND:
58 rc = -ENOENT;
59 break;
60 default:
61 rc = -EIO;
62 break;
63 }
64 return rc;
65 }
66
qeth_l2_send_setdelmac_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)67 static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
68 struct qeth_reply *reply,
69 unsigned long data)
70 {
71 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
72
73 return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
74 }
75
qeth_l2_send_setdelmac(struct qeth_card * card,const __u8 * mac,enum qeth_ipa_cmds ipacmd)76 static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
77 enum qeth_ipa_cmds ipacmd)
78 {
79 struct qeth_ipa_cmd *cmd;
80 struct qeth_cmd_buffer *iob;
81
82 QETH_CARD_TEXT(card, 2, "L2sdmac");
83 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
84 IPA_DATA_SIZEOF(setdelmac));
85 if (!iob)
86 return -ENOMEM;
87 cmd = __ipa_cmd(iob);
88 cmd->data.setdelmac.mac_length = ETH_ALEN;
89 ether_addr_copy(cmd->data.setdelmac.mac, mac);
90 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
91 }
92
qeth_l2_send_setmac(struct qeth_card * card,const __u8 * mac)93 static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
94 {
95 int rc;
96
97 QETH_CARD_TEXT(card, 2, "L2Setmac");
98 rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
99 if (rc == 0) {
100 dev_info(&card->gdev->dev,
101 "MAC address %pM successfully registered\n", mac);
102 } else {
103 switch (rc) {
104 case -EADDRINUSE:
105 dev_warn(&card->gdev->dev,
106 "MAC address %pM already exists\n", mac);
107 break;
108 case -EADDRNOTAVAIL:
109 dev_warn(&card->gdev->dev,
110 "MAC address %pM is not authorized\n", mac);
111 break;
112 }
113 }
114 return rc;
115 }
116
qeth_l2_write_mac(struct qeth_card * card,u8 * mac)117 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
118 {
119 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
120 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
121 int rc;
122
123 QETH_CARD_TEXT(card, 2, "L2Wmac");
124 rc = qeth_l2_send_setdelmac(card, mac, cmd);
125 if (rc == -EADDRINUSE)
126 QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
127 ether_addr_to_u64(mac), CARD_DEVID(card));
128 else if (rc)
129 QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
130 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
131 return rc;
132 }
133
qeth_l2_remove_mac(struct qeth_card * card,u8 * mac)134 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
135 {
136 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
137 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
138 int rc;
139
140 QETH_CARD_TEXT(card, 2, "L2Rmac");
141 rc = qeth_l2_send_setdelmac(card, mac, cmd);
142 if (rc)
143 QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
144 ether_addr_to_u64(mac), CARD_DEVID(card), rc);
145 return rc;
146 }
147
qeth_l2_drain_rx_mode_cache(struct qeth_card * card)148 static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
149 {
150 struct qeth_mac *mac;
151 struct hlist_node *tmp;
152 int i;
153
154 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
155 hash_del(&mac->hnode);
156 kfree(mac);
157 }
158 }
159
qeth_l2_fill_header(struct qeth_qdio_out_q * queue,struct qeth_hdr * hdr,struct sk_buff * skb,__be16 proto,unsigned int data_len)160 static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
161 struct qeth_hdr *hdr, struct sk_buff *skb,
162 __be16 proto, unsigned int data_len)
163 {
164 int cast_type = qeth_get_ether_cast_type(skb);
165 struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
166
167 hdr->hdr.l2.pkt_length = data_len;
168
169 if (skb_is_gso(skb)) {
170 hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
171 } else {
172 hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
173 if (skb->ip_summed == CHECKSUM_PARTIAL)
174 qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], proto);
175 }
176
177 /* set byte byte 3 to casting flags */
178 if (cast_type == RTN_MULTICAST)
179 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST;
180 else if (cast_type == RTN_BROADCAST)
181 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST;
182 else
183 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
184
185 /* VSWITCH relies on the VLAN
186 * information to be present in
187 * the QDIO header */
188 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
189 hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
190 hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
191 }
192 }
193
qeth_l2_setdelvlan_makerc(struct qeth_card * card,u16 retcode)194 static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode)
195 {
196 if (retcode)
197 QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
198
199 switch (retcode) {
200 case IPA_RC_SUCCESS:
201 return 0;
202 case IPA_RC_L2_INVALID_VLAN_ID:
203 return -EINVAL;
204 case IPA_RC_L2_DUP_VLAN_ID:
205 return -EEXIST;
206 case IPA_RC_L2_VLAN_ID_NOT_FOUND:
207 return -ENOENT;
208 case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
209 return -EPERM;
210 default:
211 return -EIO;
212 }
213 }
214
qeth_l2_send_setdelvlan_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)215 static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
216 struct qeth_reply *reply,
217 unsigned long data)
218 {
219 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
220
221 QETH_CARD_TEXT(card, 2, "L2sdvcb");
222 if (cmd->hdr.return_code) {
223 QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
224 cmd->data.setdelvlan.vlan_id,
225 CARD_DEVID(card), cmd->hdr.return_code);
226 QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
227 }
228 return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code);
229 }
230
qeth_l2_send_setdelvlan(struct qeth_card * card,__u16 i,enum qeth_ipa_cmds ipacmd)231 static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
232 enum qeth_ipa_cmds ipacmd)
233 {
234 struct qeth_ipa_cmd *cmd;
235 struct qeth_cmd_buffer *iob;
236
237 QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
238 iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4,
239 IPA_DATA_SIZEOF(setdelvlan));
240 if (!iob)
241 return -ENOMEM;
242 cmd = __ipa_cmd(iob);
243 cmd->data.setdelvlan.vlan_id = i;
244 return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL);
245 }
246
qeth_l2_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)247 static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
248 __be16 proto, u16 vid)
249 {
250 struct qeth_card *card = dev->ml_priv;
251
252 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
253 if (!vid)
254 return 0;
255
256 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
257 }
258
qeth_l2_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)259 static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
260 __be16 proto, u16 vid)
261 {
262 struct qeth_card *card = dev->ml_priv;
263
264 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
265 if (!vid)
266 return 0;
267
268 return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
269 }
270
qeth_l2_set_pnso_mode(struct qeth_card * card,enum qeth_pnso_mode mode)271 static void qeth_l2_set_pnso_mode(struct qeth_card *card,
272 enum qeth_pnso_mode mode)
273 {
274 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
275 WRITE_ONCE(card->info.pnso_mode, mode);
276 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
277
278 if (mode == QETH_PNSO_NONE)
279 drain_workqueue(card->event_wq);
280 }
281
qeth_l2_dev2br_fdb_flush(struct qeth_card * card)282 static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
283 {
284 struct switchdev_notifier_fdb_info info = {};
285
286 QETH_CARD_TEXT(card, 2, "fdbflush");
287
288 info.addr = NULL;
289 /* flush all VLANs: */
290 info.vid = 0;
291 info.added_by_user = false;
292 info.offloaded = true;
293
294 call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
295 card->dev, &info.info, NULL);
296 }
297
qeth_l2_request_initial_mac(struct qeth_card * card)298 static int qeth_l2_request_initial_mac(struct qeth_card *card)
299 {
300 int rc = 0;
301
302 QETH_CARD_TEXT(card, 2, "l2reqmac");
303
304 if (machine_is_vm()) {
305 rc = qeth_vm_request_mac(card);
306 if (!rc)
307 goto out;
308 QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
309 CARD_DEVID(card), rc);
310 QETH_CARD_TEXT_(card, 2, "err%04x", rc);
311 /* fall back to alternative mechanism: */
312 }
313
314 rc = qeth_setadpparms_change_macaddr(card);
315 if (!rc)
316 goto out;
317 QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
318 CARD_DEVID(card), rc);
319 QETH_CARD_TEXT_(card, 2, "1err%04x", rc);
320
321 /* Fall back once more, but some devices don't support a custom MAC
322 * address:
323 */
324 if (IS_OSM(card) || IS_OSX(card))
325 return (rc) ? rc : -EADDRNOTAVAIL;
326 eth_hw_addr_random(card->dev);
327
328 out:
329 QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len);
330 return 0;
331 }
332
qeth_l2_register_dev_addr(struct qeth_card * card)333 static void qeth_l2_register_dev_addr(struct qeth_card *card)
334 {
335 if (!is_valid_ether_addr(card->dev->dev_addr))
336 qeth_l2_request_initial_mac(card);
337
338 if (!qeth_l2_send_setmac(card, card->dev->dev_addr))
339 card->info.dev_addr_is_registered = 1;
340 else
341 card->info.dev_addr_is_registered = 0;
342 }
343
qeth_l2_validate_addr(struct net_device * dev)344 static int qeth_l2_validate_addr(struct net_device *dev)
345 {
346 struct qeth_card *card = dev->ml_priv;
347
348 if (card->info.dev_addr_is_registered)
349 return eth_validate_addr(dev);
350
351 QETH_CARD_TEXT(card, 4, "nomacadr");
352 return -EPERM;
353 }
354
qeth_l2_set_mac_address(struct net_device * dev,void * p)355 static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
356 {
357 struct sockaddr *addr = p;
358 struct qeth_card *card = dev->ml_priv;
359 u8 old_addr[ETH_ALEN];
360 int rc = 0;
361
362 QETH_CARD_TEXT(card, 3, "setmac");
363
364 if (IS_OSM(card) || IS_OSX(card)) {
365 QETH_CARD_TEXT(card, 3, "setmcTYP");
366 return -EOPNOTSUPP;
367 }
368 QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
369 if (!is_valid_ether_addr(addr->sa_data))
370 return -EADDRNOTAVAIL;
371
372 /* don't register the same address twice */
373 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
374 card->info.dev_addr_is_registered)
375 return 0;
376
377 /* add the new address, switch over, drop the old */
378 rc = qeth_l2_send_setmac(card, addr->sa_data);
379 if (rc)
380 return rc;
381 ether_addr_copy(old_addr, dev->dev_addr);
382 eth_hw_addr_set(dev, addr->sa_data);
383
384 if (card->info.dev_addr_is_registered)
385 qeth_l2_remove_mac(card, old_addr);
386 card->info.dev_addr_is_registered = 1;
387 return 0;
388 }
389
qeth_l2_promisc_to_bridge(struct qeth_card * card,bool enable)390 static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
391 {
392 int role;
393 int rc;
394
395 QETH_CARD_TEXT(card, 3, "pmisc2br");
396
397 if (enable) {
398 if (card->options.sbp.reflect_promisc_primary)
399 role = QETH_SBP_ROLE_PRIMARY;
400 else
401 role = QETH_SBP_ROLE_SECONDARY;
402 } else
403 role = QETH_SBP_ROLE_NONE;
404
405 rc = qeth_bridgeport_setrole(card, role);
406 QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
407 if (!rc) {
408 card->options.sbp.role = role;
409 card->info.promisc_mode = enable;
410 }
411 }
412
qeth_l2_set_promisc_mode(struct qeth_card * card)413 static void qeth_l2_set_promisc_mode(struct qeth_card *card)
414 {
415 bool enable = card->dev->flags & IFF_PROMISC;
416
417 if (card->info.promisc_mode == enable)
418 return;
419
420 if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) {
421 qeth_setadp_promisc_mode(card, enable);
422 } else {
423 mutex_lock(&card->sbp_lock);
424 if (card->options.sbp.reflect_promisc)
425 qeth_l2_promisc_to_bridge(card, enable);
426 mutex_unlock(&card->sbp_lock);
427 }
428 }
429
430 /* New MAC address is added to the hash table and marked to be written on card
431 * only if there is not in the hash table storage already
432 *
433 */
qeth_l2_add_mac(struct qeth_card * card,struct netdev_hw_addr * ha)434 static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
435 {
436 u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
437 struct qeth_mac *mac;
438
439 hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
440 if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
441 mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
442 return;
443 }
444 }
445
446 mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
447 if (!mac)
448 return;
449
450 ether_addr_copy(mac->mac_addr, ha->addr);
451 mac->disp_flag = QETH_DISP_ADDR_ADD;
452
453 hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
454 }
455
qeth_l2_rx_mode_work(struct work_struct * work)456 static void qeth_l2_rx_mode_work(struct work_struct *work)
457 {
458 struct qeth_card *card = container_of(work, struct qeth_card,
459 rx_mode_work);
460 struct net_device *dev = card->dev;
461 struct netdev_hw_addr *ha;
462 struct qeth_mac *mac;
463 struct hlist_node *tmp;
464 int i;
465 int rc;
466
467 QETH_CARD_TEXT(card, 3, "setmulti");
468
469 netif_addr_lock_bh(dev);
470 netdev_for_each_mc_addr(ha, dev)
471 qeth_l2_add_mac(card, ha);
472 netdev_for_each_uc_addr(ha, dev)
473 qeth_l2_add_mac(card, ha);
474 netif_addr_unlock_bh(dev);
475
476 hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
477 switch (mac->disp_flag) {
478 case QETH_DISP_ADDR_DELETE:
479 qeth_l2_remove_mac(card, mac->mac_addr);
480 hash_del(&mac->hnode);
481 kfree(mac);
482 break;
483 case QETH_DISP_ADDR_ADD:
484 rc = qeth_l2_write_mac(card, mac->mac_addr);
485 if (rc) {
486 hash_del(&mac->hnode);
487 kfree(mac);
488 break;
489 }
490 fallthrough;
491 default:
492 /* for next call to set_rx_mode(): */
493 mac->disp_flag = QETH_DISP_ADDR_DELETE;
494 }
495 }
496
497 qeth_l2_set_promisc_mode(card);
498 }
499
qeth_l2_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)500 static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
501 struct net_device *dev)
502 {
503 struct qeth_card *card = dev->ml_priv;
504 u16 txq = skb_get_queue_mapping(skb);
505 struct qeth_qdio_out_q *queue;
506 int rc;
507
508 if (!skb_is_gso(skb))
509 qdisc_skb_cb(skb)->pkt_len = skb->len;
510 if (IS_IQD(card))
511 txq = qeth_iqd_translate_txq(dev, txq);
512 queue = card->qdio.out_qs[txq];
513
514 rc = qeth_xmit(card, skb, queue, vlan_get_protocol(skb),
515 qeth_l2_fill_header);
516 if (!rc)
517 return NETDEV_TX_OK;
518
519 QETH_TXQ_STAT_INC(queue, tx_dropped);
520 kfree_skb(skb);
521 return NETDEV_TX_OK;
522 }
523
qeth_l2_iqd_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)524 static u16 qeth_l2_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
525 struct net_device *sb_dev)
526 {
527 return qeth_iqd_select_queue(dev, skb, qeth_get_ether_cast_type(skb),
528 sb_dev);
529 }
530
qeth_l2_set_rx_mode(struct net_device * dev)531 static void qeth_l2_set_rx_mode(struct net_device *dev)
532 {
533 struct qeth_card *card = dev->ml_priv;
534
535 schedule_work(&card->rx_mode_work);
536 }
537
538 /**
539 * qeth_l2_pnso() - perform network subchannel operation
540 * @card: qeth_card structure pointer
541 * @oc: Operation Code
542 * @cnc: Boolean Change-Notification Control
543 * @cb: Callback function will be executed for each element
544 * of the address list
545 * @priv: Pointer to pass to the callback function.
546 *
547 * Collects network information in a network address list and calls the
548 * callback function for every entry in the list. If "change-notification-
549 * control" is set, further changes in the address list will be reported
550 * via the IPA command.
551 */
qeth_l2_pnso(struct qeth_card * card,u8 oc,int cnc,void (* cb)(void * priv,struct chsc_pnso_naid_l2 * entry),void * priv)552 static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
553 void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
554 void *priv)
555 {
556 struct ccw_device *ddev = CARD_DDEV(card);
557 struct chsc_pnso_area *rr;
558 u32 prev_instance = 0;
559 int isfirstblock = 1;
560 int i, size, elems;
561 int rc;
562
563 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
564 if (rr == NULL)
565 return -ENOMEM;
566 do {
567 QETH_CARD_TEXT(card, 2, "PNSO");
568 /* on the first iteration, naihdr.resume_token will be zero */
569 rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
570 cnc);
571 if (rc)
572 continue;
573 if (cb == NULL)
574 continue;
575
576 size = rr->naihdr.naids;
577 if (size != sizeof(struct chsc_pnso_naid_l2)) {
578 WARN_ON_ONCE(1);
579 continue;
580 }
581
582 elems = (rr->response.length - sizeof(struct chsc_header) -
583 sizeof(struct chsc_pnso_naihdr)) / size;
584
585 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
586 /* Inform the caller that they need to scrap */
587 /* the data that was already reported via cb */
588 rc = -EAGAIN;
589 break;
590 }
591 isfirstblock = 0;
592 prev_instance = rr->naihdr.instance;
593 for (i = 0; i < elems; i++)
594 (*cb)(priv, &rr->entries[i]);
595 } while ((rc == -EBUSY) || (!rc && /* list stored */
596 /* resume token is non-zero => list incomplete */
597 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
598
599 if (rc)
600 QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
601
602 free_page((unsigned long)rr);
603 return rc;
604 }
605
qeth_is_my_net_if_token(struct qeth_card * card,struct net_if_token * token)606 static bool qeth_is_my_net_if_token(struct qeth_card *card,
607 struct net_if_token *token)
608 {
609 return ((card->info.ddev_devno == token->devnum) &&
610 (card->info.cssid == token->cssid) &&
611 (card->info.iid == token->iid) &&
612 (card->info.ssid == token->ssid) &&
613 (card->info.chpid == token->chpid) &&
614 (card->info.chid == token->chid));
615 }
616
617 /**
618 * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
619 * @card: qeth_card structure pointer
620 * @code: event bitmask: high order bit 0x80 set to
621 * 1 - removal of an object
622 * 0 - addition of an object
623 * Object type(s):
624 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
625 * @token: "network token" structure identifying 'physical' location
626 * of the target
627 * @addr_lnid: structure with MAC address and VLAN ID of the target
628 */
qeth_l2_dev2br_fdb_notify(struct qeth_card * card,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)629 static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
630 struct net_if_token *token,
631 struct mac_addr_lnid *addr_lnid)
632 {
633 struct switchdev_notifier_fdb_info info = {};
634 u8 ntfy_mac[ETH_ALEN];
635
636 ether_addr_copy(ntfy_mac, addr_lnid->mac);
637 /* Ignore VLAN only changes */
638 if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
639 return;
640 /* Ignore mcast entries */
641 if (is_multicast_ether_addr(ntfy_mac))
642 return;
643 /* Ignore my own addresses */
644 if (qeth_is_my_net_if_token(card, token))
645 return;
646
647 info.addr = ntfy_mac;
648 /* don't report VLAN IDs */
649 info.vid = 0;
650 info.added_by_user = false;
651 info.offloaded = true;
652
653 if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
654 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
655 card->dev, &info.info, NULL);
656 QETH_CARD_TEXT(card, 4, "andelmac");
657 QETH_CARD_TEXT_(card, 4,
658 "mc%012llx", ether_addr_to_u64(ntfy_mac));
659 } else {
660 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
661 card->dev, &info.info, NULL);
662 QETH_CARD_TEXT(card, 4, "anaddmac");
663 QETH_CARD_TEXT_(card, 4,
664 "mc%012llx", ether_addr_to_u64(ntfy_mac));
665 }
666 }
667
qeth_l2_dev2br_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)668 static void qeth_l2_dev2br_an_set_cb(void *priv,
669 struct chsc_pnso_naid_l2 *entry)
670 {
671 u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
672 struct qeth_card *card = priv;
673
674 if (entry->addr_lnid.lnid < VLAN_N_VID)
675 code |= IPA_ADDR_CHANGE_CODE_VLANID;
676 qeth_l2_dev2br_fdb_notify(card, code,
677 (struct net_if_token *)&entry->nit,
678 (struct mac_addr_lnid *)&entry->addr_lnid);
679 }
680
681 /**
682 * qeth_l2_dev2br_an_set() -
683 * Enable or disable 'dev to bridge network address notification'
684 * @card: qeth_card structure pointer
685 * @enable: Enable or disable 'dev to bridge network address notification'
686 *
687 * Returns negative errno-compatible error indication or 0 on success.
688 *
689 * On enable, emits a series of address notifications for all
690 * currently registered hosts.
691 */
qeth_l2_dev2br_an_set(struct qeth_card * card,bool enable)692 static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
693 {
694 int rc;
695
696 if (enable) {
697 QETH_CARD_TEXT(card, 2, "anseton");
698 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
699 qeth_l2_dev2br_an_set_cb, card);
700 if (rc == -EAGAIN)
701 /* address notification enabled, but inconsistent
702 * addresses reported -> disable address notification
703 */
704 qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
705 NULL, NULL);
706 } else {
707 QETH_CARD_TEXT(card, 2, "ansetoff");
708 rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
709 }
710
711 return rc;
712 }
713
714 struct qeth_l2_br2dev_event_work {
715 struct work_struct work;
716 struct net_device *br_dev;
717 struct net_device *lsync_dev;
718 struct net_device *dst_dev;
719 unsigned long event;
720 unsigned char addr[ETH_ALEN];
721 };
722
723 static const struct net_device_ops qeth_l2_iqd_netdev_ops;
724 static const struct net_device_ops qeth_l2_osa_netdev_ops;
725
qeth_l2_must_learn(struct net_device * netdev,struct net_device * dstdev)726 static bool qeth_l2_must_learn(struct net_device *netdev,
727 struct net_device *dstdev)
728 {
729 struct qeth_priv *priv;
730
731 priv = netdev_priv(netdev);
732 return (netdev != dstdev &&
733 (priv->brport_features & BR_LEARNING_SYNC) &&
734 !(br_port_flag_is_set(netdev, BR_ISOLATED) &&
735 br_port_flag_is_set(dstdev, BR_ISOLATED)) &&
736 (netdev->netdev_ops == &qeth_l2_iqd_netdev_ops ||
737 netdev->netdev_ops == &qeth_l2_osa_netdev_ops));
738 }
739
740 /**
741 * qeth_l2_br2dev_worker() - update local MACs
742 * @work: bridge to device FDB update
743 *
744 * Update local MACs of a learning_sync bridgeport so it can receive
745 * messages for a destination port.
746 * In case of an isolated learning_sync port, also update its isolated
747 * siblings.
748 */
qeth_l2_br2dev_worker(struct work_struct * work)749 static void qeth_l2_br2dev_worker(struct work_struct *work)
750 {
751 struct qeth_l2_br2dev_event_work *br2dev_event_work =
752 container_of(work, struct qeth_l2_br2dev_event_work, work);
753 struct net_device *lsyncdev = br2dev_event_work->lsync_dev;
754 struct net_device *dstdev = br2dev_event_work->dst_dev;
755 struct net_device *brdev = br2dev_event_work->br_dev;
756 unsigned long event = br2dev_event_work->event;
757 unsigned char *addr = br2dev_event_work->addr;
758 struct qeth_card *card = lsyncdev->ml_priv;
759 struct net_device *lowerdev;
760 struct list_head *iter;
761 int err = 0;
762
763 QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
764 QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
765
766 rcu_read_lock();
767 /* Verify preconditions are still valid: */
768 if (!netif_is_bridge_port(lsyncdev) ||
769 brdev != netdev_master_upper_dev_get_rcu(lsyncdev))
770 goto unlock;
771 if (!qeth_l2_must_learn(lsyncdev, dstdev))
772 goto unlock;
773
774 if (br_port_flag_is_set(lsyncdev, BR_ISOLATED)) {
775 /* Update lsyncdev and its isolated sibling(s): */
776 iter = &brdev->adj_list.lower;
777 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
778 while (lowerdev) {
779 if (br_port_flag_is_set(lowerdev, BR_ISOLATED)) {
780 switch (event) {
781 case SWITCHDEV_FDB_ADD_TO_DEVICE:
782 err = dev_uc_add(lowerdev, addr);
783 break;
784 case SWITCHDEV_FDB_DEL_TO_DEVICE:
785 err = dev_uc_del(lowerdev, addr);
786 break;
787 default:
788 break;
789 }
790 if (err) {
791 QETH_CARD_TEXT(card, 2, "b2derris");
792 QETH_CARD_TEXT_(card, 2,
793 "err%02lx%03d", event,
794 lowerdev->ifindex);
795 }
796 }
797 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
798 }
799 } else {
800 switch (event) {
801 case SWITCHDEV_FDB_ADD_TO_DEVICE:
802 err = dev_uc_add(lsyncdev, addr);
803 break;
804 case SWITCHDEV_FDB_DEL_TO_DEVICE:
805 err = dev_uc_del(lsyncdev, addr);
806 break;
807 default:
808 break;
809 }
810 if (err)
811 QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
812 }
813
814 unlock:
815 rcu_read_unlock();
816 dev_put(brdev);
817 dev_put(lsyncdev);
818 dev_put(dstdev);
819 kfree(br2dev_event_work);
820 }
821
qeth_l2_br2dev_queue_work(struct net_device * brdev,struct net_device * lsyncdev,struct net_device * dstdev,unsigned long event,const unsigned char * addr)822 static int qeth_l2_br2dev_queue_work(struct net_device *brdev,
823 struct net_device *lsyncdev,
824 struct net_device *dstdev,
825 unsigned long event,
826 const unsigned char *addr)
827 {
828 struct qeth_l2_br2dev_event_work *worker_data;
829 struct qeth_card *card;
830
831 worker_data = kzalloc(sizeof(*worker_data), GFP_ATOMIC);
832 if (!worker_data)
833 return -ENOMEM;
834 INIT_WORK(&worker_data->work, qeth_l2_br2dev_worker);
835 worker_data->br_dev = brdev;
836 worker_data->lsync_dev = lsyncdev;
837 worker_data->dst_dev = dstdev;
838 worker_data->event = event;
839 ether_addr_copy(worker_data->addr, addr);
840
841 card = lsyncdev->ml_priv;
842 /* Take a reference on the sw port devices and the bridge */
843 dev_hold(brdev);
844 dev_hold(lsyncdev);
845 dev_hold(dstdev);
846 queue_work(card->event_wq, &worker_data->work);
847 return 0;
848 }
849
850 /* Called under rtnl_lock */
qeth_l2_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)851 static int qeth_l2_switchdev_event(struct notifier_block *unused,
852 unsigned long event, void *ptr)
853 {
854 struct net_device *dstdev, *brdev, *lowerdev;
855 struct switchdev_notifier_fdb_info *fdb_info;
856 struct switchdev_notifier_info *info = ptr;
857 struct list_head *iter;
858 struct qeth_card *card;
859 int rc;
860
861 if (!(event == SWITCHDEV_FDB_ADD_TO_DEVICE ||
862 event == SWITCHDEV_FDB_DEL_TO_DEVICE))
863 return NOTIFY_DONE;
864
865 dstdev = switchdev_notifier_info_to_dev(info);
866 brdev = netdev_master_upper_dev_get_rcu(dstdev);
867 if (!brdev || !netif_is_bridge_master(brdev))
868 return NOTIFY_DONE;
869 fdb_info = container_of(info,
870 struct switchdev_notifier_fdb_info,
871 info);
872 iter = &brdev->adj_list.lower;
873 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
874 while (lowerdev) {
875 if (qeth_l2_must_learn(lowerdev, dstdev)) {
876 card = lowerdev->ml_priv;
877 QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
878 rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
879 dstdev, event,
880 fdb_info->addr);
881 if (rc) {
882 QETH_CARD_TEXT(card, 2, "b2dqwerr");
883 return NOTIFY_BAD;
884 }
885 }
886 lowerdev = netdev_next_lower_dev_rcu(brdev, &iter);
887 }
888 return NOTIFY_DONE;
889 }
890
891 static struct notifier_block qeth_l2_sw_notifier = {
892 .notifier_call = qeth_l2_switchdev_event,
893 };
894
895 static refcount_t qeth_l2_switchdev_notify_refcnt;
896
897 /* Called under rtnl_lock */
qeth_l2_br2dev_get(void)898 static void qeth_l2_br2dev_get(void)
899 {
900 int rc;
901
902 if (!refcount_inc_not_zero(&qeth_l2_switchdev_notify_refcnt)) {
903 rc = register_switchdev_notifier(&qeth_l2_sw_notifier);
904 if (rc) {
905 QETH_DBF_MESSAGE(2,
906 "failed to register qeth_l2_sw_notifier: %d\n",
907 rc);
908 } else {
909 refcount_set(&qeth_l2_switchdev_notify_refcnt, 1);
910 QETH_DBF_MESSAGE(2, "qeth_l2_sw_notifier registered\n");
911 }
912 }
913 QETH_DBF_TEXT_(SETUP, 2, "b2d+%04d",
914 qeth_l2_switchdev_notify_refcnt.refs.counter);
915 }
916
917 /* Called under rtnl_lock */
qeth_l2_br2dev_put(void)918 static void qeth_l2_br2dev_put(void)
919 {
920 int rc;
921
922 if (refcount_dec_and_test(&qeth_l2_switchdev_notify_refcnt)) {
923 rc = unregister_switchdev_notifier(&qeth_l2_sw_notifier);
924 if (rc) {
925 QETH_DBF_MESSAGE(2,
926 "failed to unregister qeth_l2_sw_notifier: %d\n",
927 rc);
928 } else {
929 QETH_DBF_MESSAGE(2,
930 "qeth_l2_sw_notifier unregistered\n");
931 }
932 }
933 QETH_DBF_TEXT_(SETUP, 2, "b2d-%04d",
934 qeth_l2_switchdev_notify_refcnt.refs.counter);
935 }
936
qeth_l2_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)937 static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
938 struct net_device *dev, u32 filter_mask,
939 int nlflags)
940 {
941 struct qeth_priv *priv = netdev_priv(dev);
942 struct qeth_card *card = dev->ml_priv;
943 u16 mode = BRIDGE_MODE_UNDEF;
944
945 /* Do not even show qeth devs that cannot do bridge_setlink */
946 if (!priv->brport_hw_features || !netif_device_present(dev) ||
947 qeth_bridgeport_is_in_use(card))
948 return -EOPNOTSUPP;
949
950 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
951 mode, priv->brport_features,
952 priv->brport_hw_features,
953 nlflags, filter_mask, NULL);
954 }
955
956 static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
957 [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
958 };
959
960 /**
961 * qeth_l2_bridge_setlink() - set bridgeport attributes
962 * @dev: netdevice
963 * @nlh: netlink message header
964 * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
965 * @extack: extended ACK report struct
966 *
967 * Called under rtnl_lock
968 */
qeth_l2_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)969 static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
970 u16 flags, struct netlink_ext_ack *extack)
971 {
972 struct qeth_priv *priv = netdev_priv(dev);
973 struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
974 struct qeth_card *card = dev->ml_priv;
975 struct nlattr *attr, *nested_attr;
976 bool enable, has_protinfo = false;
977 int rem1, rem2;
978 int rc;
979
980 if (!netif_device_present(dev))
981 return -ENODEV;
982
983 nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
984 if (nla_type(attr) == IFLA_PROTINFO) {
985 rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
986 qeth_brport_policy, extack);
987 if (rc)
988 return rc;
989 has_protinfo = true;
990 } else if (nla_type(attr) == IFLA_AF_SPEC) {
991 nla_for_each_nested(nested_attr, attr, rem2) {
992 if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
993 continue;
994 NL_SET_ERR_MSG_ATTR(extack, nested_attr,
995 "Unsupported attribute");
996 return -EINVAL;
997 }
998 } else {
999 NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
1000 return -EINVAL;
1001 }
1002 }
1003 if (!has_protinfo)
1004 return 0;
1005 if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
1006 return -EINVAL;
1007 if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
1008 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1009 "Operation not supported by HW");
1010 return -EOPNOTSUPP;
1011 }
1012 if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
1013 NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
1014 "Requires NET_SWITCHDEV");
1015 return -EOPNOTSUPP;
1016 }
1017 enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
1018
1019 if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
1020 return 0;
1021
1022 mutex_lock(&card->sbp_lock);
1023 /* do not change anything if BridgePort is enabled */
1024 if (qeth_bridgeport_is_in_use(card)) {
1025 NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
1026 rc = -EBUSY;
1027 } else if (enable) {
1028 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1029 rc = qeth_l2_dev2br_an_set(card, true);
1030 if (rc) {
1031 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1032 } else {
1033 priv->brport_features |= BR_LEARNING_SYNC;
1034 qeth_l2_br2dev_get();
1035 }
1036 } else {
1037 rc = qeth_l2_dev2br_an_set(card, false);
1038 if (!rc) {
1039 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1040 priv->brport_features ^= BR_LEARNING_SYNC;
1041 qeth_l2_dev2br_fdb_flush(card);
1042 qeth_l2_br2dev_put();
1043 }
1044 }
1045 mutex_unlock(&card->sbp_lock);
1046
1047 return rc;
1048 }
1049
1050 static const struct net_device_ops qeth_l2_iqd_netdev_ops = {
1051 .ndo_open = qeth_open,
1052 .ndo_stop = qeth_stop,
1053 .ndo_get_stats64 = qeth_get_stats64,
1054 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1055 .ndo_features_check = qeth_features_check,
1056 .ndo_select_queue = qeth_l2_iqd_select_queue,
1057 .ndo_validate_addr = qeth_l2_validate_addr,
1058 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1059 .ndo_eth_ioctl = qeth_do_ioctl,
1060 .ndo_siocdevprivate = qeth_siocdevprivate,
1061 .ndo_set_mac_address = qeth_l2_set_mac_address,
1062 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1063 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1064 .ndo_tx_timeout = qeth_tx_timeout,
1065 .ndo_fix_features = qeth_fix_features,
1066 .ndo_set_features = qeth_set_features,
1067 .ndo_bridge_getlink = qeth_l2_bridge_getlink,
1068 .ndo_bridge_setlink = qeth_l2_bridge_setlink,
1069 };
1070
1071 static const struct net_device_ops qeth_l2_osa_netdev_ops = {
1072 .ndo_open = qeth_open,
1073 .ndo_stop = qeth_stop,
1074 .ndo_get_stats64 = qeth_get_stats64,
1075 .ndo_start_xmit = qeth_l2_hard_start_xmit,
1076 .ndo_features_check = qeth_features_check,
1077 .ndo_select_queue = qeth_osa_select_queue,
1078 .ndo_validate_addr = qeth_l2_validate_addr,
1079 .ndo_set_rx_mode = qeth_l2_set_rx_mode,
1080 .ndo_eth_ioctl = qeth_do_ioctl,
1081 .ndo_siocdevprivate = qeth_siocdevprivate,
1082 .ndo_set_mac_address = qeth_l2_set_mac_address,
1083 .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
1084 .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
1085 .ndo_tx_timeout = qeth_tx_timeout,
1086 .ndo_fix_features = qeth_fix_features,
1087 .ndo_set_features = qeth_set_features,
1088 };
1089
qeth_l2_setup_netdev(struct qeth_card * card)1090 static int qeth_l2_setup_netdev(struct qeth_card *card)
1091 {
1092 card->dev->netdev_ops = IS_IQD(card) ? &qeth_l2_iqd_netdev_ops :
1093 &qeth_l2_osa_netdev_ops;
1094 card->dev->needed_headroom = sizeof(struct qeth_hdr);
1095 card->dev->priv_flags |= IFF_UNICAST_FLT;
1096
1097 if (IS_OSM(card)) {
1098 card->dev->features |= NETIF_F_VLAN_CHALLENGED;
1099 } else {
1100 if (!IS_VM_NIC(card))
1101 card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1102 card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1103 }
1104
1105 if (IS_OSD(card) && !IS_VM_NIC(card)) {
1106 card->dev->features |= NETIF_F_SG;
1107 /* OSA 3S and earlier has no RX/TX support */
1108 if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
1109 card->dev->hw_features |= NETIF_F_IP_CSUM;
1110 card->dev->vlan_features |= NETIF_F_IP_CSUM;
1111 }
1112 }
1113 if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) {
1114 card->dev->hw_features |= NETIF_F_IPV6_CSUM;
1115 card->dev->vlan_features |= NETIF_F_IPV6_CSUM;
1116 }
1117 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) ||
1118 qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) {
1119 card->dev->hw_features |= NETIF_F_RXCSUM;
1120 card->dev->vlan_features |= NETIF_F_RXCSUM;
1121 }
1122 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
1123 card->dev->hw_features |= NETIF_F_TSO;
1124 card->dev->vlan_features |= NETIF_F_TSO;
1125 }
1126 if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) {
1127 card->dev->hw_features |= NETIF_F_TSO6;
1128 card->dev->vlan_features |= NETIF_F_TSO6;
1129 }
1130
1131 if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1132 card->dev->needed_headroom = sizeof(struct qeth_hdr_tso);
1133 netif_keep_dst(card->dev);
1134 netif_set_tso_max_size(card->dev,
1135 PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
1136 }
1137
1138 netif_napi_add(card->dev, &card->napi, qeth_poll);
1139 return register_netdev(card->dev);
1140 }
1141
qeth_l2_trace_features(struct qeth_card * card)1142 static void qeth_l2_trace_features(struct qeth_card *card)
1143 {
1144 /* Set BridgePort features */
1145 QETH_CARD_TEXT(card, 2, "featuSBP");
1146 QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
1147 sizeof(card->options.sbp.supported_funcs));
1148 /* VNIC Characteristics features */
1149 QETH_CARD_TEXT(card, 2, "feaVNICC");
1150 QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
1151 sizeof(card->options.vnicc.sup_chars));
1152 }
1153
qeth_l2_setup_bridgeport_attrs(struct qeth_card * card)1154 static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
1155 {
1156 if (!card->options.sbp.reflect_promisc &&
1157 card->options.sbp.role != QETH_SBP_ROLE_NONE) {
1158 /* Conditional to avoid spurious error messages */
1159 qeth_bridgeport_setrole(card, card->options.sbp.role);
1160 /* Let the callback function refresh the stored role value. */
1161 qeth_bridgeport_query_ports(card, &card->options.sbp.role,
1162 NULL);
1163 }
1164 if (card->options.sbp.hostnotification) {
1165 if (qeth_bridgeport_an_set(card, 1))
1166 card->options.sbp.hostnotification = 0;
1167 }
1168 }
1169
1170 /**
1171 * qeth_l2_detect_dev2br_support() -
1172 * Detect whether this card supports 'dev to bridge fdb network address
1173 * change notification' and thus can support the learning_sync bridgeport
1174 * attribute
1175 * @card: qeth_card structure pointer
1176 */
qeth_l2_detect_dev2br_support(struct qeth_card * card)1177 static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
1178 {
1179 struct qeth_priv *priv = netdev_priv(card->dev);
1180 bool dev2br_supported;
1181
1182 QETH_CARD_TEXT(card, 2, "d2brsup");
1183 if (!IS_IQD(card))
1184 return;
1185
1186 /* dev2br requires valid cssid,iid,chid */
1187 dev2br_supported = card->info.ids_valid &&
1188 css_general_characteristics.enarf;
1189 QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
1190
1191 if (dev2br_supported)
1192 priv->brport_hw_features |= BR_LEARNING_SYNC;
1193 else
1194 priv->brport_hw_features &= ~BR_LEARNING_SYNC;
1195 }
1196
qeth_l2_enable_brport_features(struct qeth_card * card)1197 static void qeth_l2_enable_brport_features(struct qeth_card *card)
1198 {
1199 struct qeth_priv *priv = netdev_priv(card->dev);
1200 int rc;
1201
1202 if (priv->brport_features & BR_LEARNING_SYNC) {
1203 if (priv->brport_hw_features & BR_LEARNING_SYNC) {
1204 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1205 rc = qeth_l2_dev2br_an_set(card, true);
1206 if (rc == -EAGAIN) {
1207 /* Recoverable error, retry once */
1208 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1209 qeth_l2_dev2br_fdb_flush(card);
1210 qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
1211 rc = qeth_l2_dev2br_an_set(card, true);
1212 }
1213 if (rc) {
1214 netdev_err(card->dev,
1215 "failed to enable bridge learning_sync: %d\n",
1216 rc);
1217 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1218 qeth_l2_dev2br_fdb_flush(card);
1219 priv->brport_features ^= BR_LEARNING_SYNC;
1220 }
1221 } else {
1222 dev_warn(&card->gdev->dev,
1223 "bridge learning_sync not supported\n");
1224 priv->brport_features ^= BR_LEARNING_SYNC;
1225 }
1226 }
1227 }
1228
1229 /* SETBRIDGEPORT support, async notifications */
1230
1231 enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
1232
1233 /**
1234 * qeth_bridge_emit_host_event() - bridgeport address change notification
1235 * @card: qeth_card structure pointer, for udev events.
1236 * @evtype: "normal" register/unregister, or abort, or reset. For abort
1237 * and reset token and addr_lnid are unused and may be NULL.
1238 * @code: event bitmask: high order bit 0x80 value 1 means removal of an
1239 * object, 0 - addition of an object.
1240 * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC.
1241 * @token: "network token" structure identifying physical address of the port.
1242 * @addr_lnid: pointer to structure with MAC address and VLAN ID.
1243 *
1244 * This function is called when registrations and deregistrations are
1245 * reported by the hardware, and also when notifications are enabled -
1246 * for all currently registered addresses.
1247 */
qeth_bridge_emit_host_event(struct qeth_card * card,enum qeth_an_event_type evtype,u8 code,struct net_if_token * token,struct mac_addr_lnid * addr_lnid)1248 static void qeth_bridge_emit_host_event(struct qeth_card *card,
1249 enum qeth_an_event_type evtype,
1250 u8 code,
1251 struct net_if_token *token,
1252 struct mac_addr_lnid *addr_lnid)
1253 {
1254 char str[7][32];
1255 char *env[8];
1256 int i = 0;
1257
1258 switch (evtype) {
1259 case anev_reg_unreg:
1260 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
1261 (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
1262 ? "deregister" : "register");
1263 env[i] = str[i]; i++;
1264 if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
1265 scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
1266 addr_lnid->lnid);
1267 env[i] = str[i]; i++;
1268 }
1269 if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
1270 scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
1271 addr_lnid->mac);
1272 env[i] = str[i]; i++;
1273 }
1274 scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
1275 token->cssid, token->ssid, token->devnum);
1276 env[i] = str[i]; i++;
1277 scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
1278 env[i] = str[i]; i++;
1279 scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
1280 token->chpid);
1281 env[i] = str[i]; i++;
1282 scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
1283 token->chid);
1284 env[i] = str[i]; i++;
1285 break;
1286 case anev_abort:
1287 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
1288 env[i] = str[i]; i++;
1289 break;
1290 case anev_reset:
1291 scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
1292 env[i] = str[i]; i++;
1293 break;
1294 }
1295 env[i] = NULL;
1296 kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env);
1297 }
1298
1299 struct qeth_bridge_state_data {
1300 struct work_struct worker;
1301 struct qeth_card *card;
1302 u8 role;
1303 u8 state;
1304 };
1305
qeth_bridge_state_change_worker(struct work_struct * work)1306 static void qeth_bridge_state_change_worker(struct work_struct *work)
1307 {
1308 struct qeth_bridge_state_data *data =
1309 container_of(work, struct qeth_bridge_state_data, worker);
1310 char env_locrem[32];
1311 char env_role[32];
1312 char env_state[32];
1313 char *env[] = {
1314 env_locrem,
1315 env_role,
1316 env_state,
1317 NULL
1318 };
1319
1320 scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
1321 scnprintf(env_role, sizeof(env_role), "ROLE=%s",
1322 (data->role == QETH_SBP_ROLE_NONE) ? "none" :
1323 (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
1324 (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
1325 "<INVALID>");
1326 scnprintf(env_state, sizeof(env_state), "STATE=%s",
1327 (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
1328 (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
1329 (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
1330 "<INVALID>");
1331 kobject_uevent_env(&data->card->gdev->dev.kobj,
1332 KOBJ_CHANGE, env);
1333 kfree(data);
1334 }
1335
qeth_bridge_state_change(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1336 static void qeth_bridge_state_change(struct qeth_card *card,
1337 struct qeth_ipa_cmd *cmd)
1338 {
1339 struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
1340 struct qeth_bridge_state_data *data;
1341
1342 QETH_CARD_TEXT(card, 2, "brstchng");
1343 if (qports->num_entries == 0) {
1344 QETH_CARD_TEXT(card, 2, "BPempty");
1345 return;
1346 }
1347 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1348 QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
1349 return;
1350 }
1351
1352 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1353 if (!data) {
1354 QETH_CARD_TEXT(card, 2, "BPSalloc");
1355 return;
1356 }
1357 INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
1358 data->card = card;
1359 /* Information for the local port: */
1360 data->role = qports->entry[0].role;
1361 data->state = qports->entry[0].state;
1362
1363 queue_work(card->event_wq, &data->worker);
1364 }
1365
1366 struct qeth_addr_change_data {
1367 struct delayed_work dwork;
1368 struct qeth_card *card;
1369 struct qeth_ipacmd_addr_change ac_event;
1370 };
1371
qeth_l2_dev2br_worker(struct work_struct * work)1372 static void qeth_l2_dev2br_worker(struct work_struct *work)
1373 {
1374 struct delayed_work *dwork = to_delayed_work(work);
1375 struct qeth_addr_change_data *data;
1376 struct qeth_card *card;
1377 struct qeth_priv *priv;
1378 unsigned int i;
1379 int rc;
1380
1381 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1382 card = data->card;
1383 priv = netdev_priv(card->dev);
1384
1385 QETH_CARD_TEXT(card, 4, "dev2brew");
1386
1387 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1388 goto free;
1389
1390 if (data->ac_event.lost_event_mask) {
1391 /* Potential re-config in progress, try again later: */
1392 if (!rtnl_trylock()) {
1393 queue_delayed_work(card->event_wq, dwork,
1394 msecs_to_jiffies(100));
1395 return;
1396 }
1397
1398 if (!netif_device_present(card->dev)) {
1399 rtnl_unlock();
1400 goto free;
1401 }
1402
1403 QETH_DBF_MESSAGE(3,
1404 "Address change notification overflow on device %x\n",
1405 CARD_DEVID(card));
1406 /* Card fdb and bridge fdb are out of sync, card has stopped
1407 * notifications (no need to drain_workqueue). Purge all
1408 * 'extern_learn' entries from the parent bridge and restart
1409 * the notifications.
1410 */
1411 qeth_l2_dev2br_fdb_flush(card);
1412 rc = qeth_l2_dev2br_an_set(card, true);
1413 if (rc) {
1414 /* TODO: if we want to retry after -EAGAIN, be
1415 * aware there could be stale entries in the
1416 * workqueue now, that need to be drained.
1417 * For now we give up:
1418 */
1419 netdev_err(card->dev,
1420 "bridge learning_sync failed to recover: %d\n",
1421 rc);
1422 WRITE_ONCE(card->info.pnso_mode,
1423 QETH_PNSO_NONE);
1424 /* To remove fdb entries reported by an_set: */
1425 qeth_l2_dev2br_fdb_flush(card);
1426 priv->brport_features ^= BR_LEARNING_SYNC;
1427 } else {
1428 QETH_DBF_MESSAGE(3,
1429 "Address Notification resynced on device %x\n",
1430 CARD_DEVID(card));
1431 }
1432
1433 rtnl_unlock();
1434 } else {
1435 for (i = 0; i < data->ac_event.num_entries; i++) {
1436 struct qeth_ipacmd_addr_change_entry *entry =
1437 &data->ac_event.entry[i];
1438 qeth_l2_dev2br_fdb_notify(card,
1439 entry->change_code,
1440 &entry->token,
1441 &entry->addr_lnid);
1442 }
1443 }
1444
1445 free:
1446 kfree(data);
1447 }
1448
qeth_addr_change_event_worker(struct work_struct * work)1449 static void qeth_addr_change_event_worker(struct work_struct *work)
1450 {
1451 struct delayed_work *dwork = to_delayed_work(work);
1452 struct qeth_addr_change_data *data;
1453 struct qeth_card *card;
1454 int i;
1455
1456 data = container_of(dwork, struct qeth_addr_change_data, dwork);
1457 card = data->card;
1458
1459 QETH_CARD_TEXT(data->card, 4, "adrchgew");
1460
1461 if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
1462 goto free;
1463
1464 if (data->ac_event.lost_event_mask) {
1465 /* Potential re-config in progress, try again later: */
1466 if (!mutex_trylock(&card->sbp_lock)) {
1467 queue_delayed_work(card->event_wq, dwork,
1468 msecs_to_jiffies(100));
1469 return;
1470 }
1471
1472 dev_info(&data->card->gdev->dev,
1473 "Address change notification stopped on %s (%s)\n",
1474 netdev_name(card->dev),
1475 (data->ac_event.lost_event_mask == 0x01)
1476 ? "Overflow"
1477 : (data->ac_event.lost_event_mask == 0x02)
1478 ? "Bridge port state change"
1479 : "Unknown reason");
1480
1481 data->card->options.sbp.hostnotification = 0;
1482 card->info.pnso_mode = QETH_PNSO_NONE;
1483 mutex_unlock(&data->card->sbp_lock);
1484 qeth_bridge_emit_host_event(data->card, anev_abort,
1485 0, NULL, NULL);
1486 } else
1487 for (i = 0; i < data->ac_event.num_entries; i++) {
1488 struct qeth_ipacmd_addr_change_entry *entry =
1489 &data->ac_event.entry[i];
1490 qeth_bridge_emit_host_event(data->card,
1491 anev_reg_unreg,
1492 entry->change_code,
1493 &entry->token,
1494 &entry->addr_lnid);
1495 }
1496
1497 free:
1498 kfree(data);
1499 }
1500
qeth_addr_change_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1501 static void qeth_addr_change_event(struct qeth_card *card,
1502 struct qeth_ipa_cmd *cmd)
1503 {
1504 struct qeth_ipacmd_addr_change *hostevs =
1505 &cmd->data.addrchange;
1506 struct qeth_addr_change_data *data;
1507 int extrasize;
1508
1509 if (card->info.pnso_mode == QETH_PNSO_NONE)
1510 return;
1511
1512 QETH_CARD_TEXT(card, 4, "adrchgev");
1513 if (cmd->hdr.return_code != 0x0000) {
1514 if (cmd->hdr.return_code == 0x0010) {
1515 if (hostevs->lost_event_mask == 0x00)
1516 hostevs->lost_event_mask = 0xff;
1517 } else {
1518 QETH_CARD_TEXT_(card, 2, "ACHN%04x",
1519 cmd->hdr.return_code);
1520 return;
1521 }
1522 }
1523 extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
1524 hostevs->num_entries;
1525 data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
1526 GFP_ATOMIC);
1527 if (!data) {
1528 QETH_CARD_TEXT(card, 2, "ACNalloc");
1529 return;
1530 }
1531 if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
1532 INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
1533 else
1534 INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
1535 data->card = card;
1536 data->ac_event = *hostevs;
1537 memcpy(data->ac_event.entry, hostevs->entry, extrasize);
1538 queue_delayed_work(card->event_wq, &data->dwork, 0);
1539 }
1540
1541 /* SETBRIDGEPORT support; sending commands */
1542
1543 struct _qeth_sbp_cbctl {
1544 union {
1545 u32 supported;
1546 struct {
1547 enum qeth_sbp_roles *role;
1548 enum qeth_sbp_states *state;
1549 } qports;
1550 } data;
1551 };
1552
qeth_bridgeport_makerc(struct qeth_card * card,struct qeth_ipa_cmd * cmd)1553 static int qeth_bridgeport_makerc(struct qeth_card *card,
1554 struct qeth_ipa_cmd *cmd)
1555 {
1556 struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp;
1557 enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code;
1558 u16 ipa_rc = cmd->hdr.return_code;
1559 u16 sbp_rc = sbp->hdr.return_code;
1560 int rc;
1561
1562 if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS)
1563 return 0;
1564
1565 if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) ||
1566 (!IS_IQD(card) && ipa_rc == sbp_rc)) {
1567 switch (sbp_rc) {
1568 case IPA_RC_SUCCESS:
1569 rc = 0;
1570 break;
1571 case IPA_RC_L2_UNSUPPORTED_CMD:
1572 case IPA_RC_UNSUPPORTED_COMMAND:
1573 rc = -EOPNOTSUPP;
1574 break;
1575 case IPA_RC_SBP_OSA_NOT_CONFIGURED:
1576 case IPA_RC_SBP_IQD_NOT_CONFIGURED:
1577 rc = -ENODEV; /* maybe not the best code here? */
1578 dev_err(&card->gdev->dev,
1579 "The device is not configured as a Bridge Port\n");
1580 break;
1581 case IPA_RC_SBP_OSA_OS_MISMATCH:
1582 case IPA_RC_SBP_IQD_OS_MISMATCH:
1583 rc = -EPERM;
1584 dev_err(&card->gdev->dev,
1585 "A Bridge Port is already configured by a different operating system\n");
1586 break;
1587 case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY:
1588 case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY:
1589 switch (setcmd) {
1590 case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
1591 rc = -EEXIST;
1592 dev_err(&card->gdev->dev,
1593 "The LAN already has a primary Bridge Port\n");
1594 break;
1595 case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
1596 rc = -EBUSY;
1597 dev_err(&card->gdev->dev,
1598 "The device is already a primary Bridge Port\n");
1599 break;
1600 default:
1601 rc = -EIO;
1602 }
1603 break;
1604 case IPA_RC_SBP_OSA_CURRENT_SECOND:
1605 case IPA_RC_SBP_IQD_CURRENT_SECOND:
1606 rc = -EBUSY;
1607 dev_err(&card->gdev->dev,
1608 "The device is already a secondary Bridge Port\n");
1609 break;
1610 case IPA_RC_SBP_OSA_LIMIT_SECOND:
1611 case IPA_RC_SBP_IQD_LIMIT_SECOND:
1612 rc = -EEXIST;
1613 dev_err(&card->gdev->dev,
1614 "The LAN cannot have more secondary Bridge Ports\n");
1615 break;
1616 case IPA_RC_SBP_OSA_CURRENT_PRIMARY:
1617 case IPA_RC_SBP_IQD_CURRENT_PRIMARY:
1618 rc = -EBUSY;
1619 dev_err(&card->gdev->dev,
1620 "The device is already a primary Bridge Port\n");
1621 break;
1622 case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN:
1623 case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN:
1624 rc = -EACCES;
1625 dev_err(&card->gdev->dev,
1626 "The device is not authorized to be a Bridge Port\n");
1627 break;
1628 default:
1629 rc = -EIO;
1630 }
1631 } else {
1632 switch (ipa_rc) {
1633 case IPA_RC_NOTSUPP:
1634 rc = -EOPNOTSUPP;
1635 break;
1636 case IPA_RC_UNSUPPORTED_COMMAND:
1637 rc = -EOPNOTSUPP;
1638 break;
1639 default:
1640 rc = -EIO;
1641 }
1642 }
1643
1644 if (rc) {
1645 QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc);
1646 QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc);
1647 }
1648 return rc;
1649 }
1650
qeth_sbp_build_cmd(struct qeth_card * card,enum qeth_ipa_sbp_cmd sbp_cmd,unsigned int data_length)1651 static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
1652 enum qeth_ipa_sbp_cmd sbp_cmd,
1653 unsigned int data_length)
1654 {
1655 enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
1656 IPA_CMD_SETBRIDGEPORT_OSA;
1657 struct qeth_ipacmd_sbp_hdr *hdr;
1658 struct qeth_cmd_buffer *iob;
1659
1660 iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE,
1661 data_length +
1662 offsetof(struct qeth_ipacmd_setbridgeport,
1663 data));
1664 if (!iob)
1665 return iob;
1666
1667 hdr = &__ipa_cmd(iob)->data.sbp.hdr;
1668 hdr->cmdlength = sizeof(*hdr) + data_length;
1669 hdr->command_code = sbp_cmd;
1670 hdr->used_total = 1;
1671 hdr->seq_no = 1;
1672 return iob;
1673 }
1674
qeth_bridgeport_query_support_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1675 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
1676 struct qeth_reply *reply, unsigned long data)
1677 {
1678 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1679 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1680 int rc;
1681
1682 QETH_CARD_TEXT(card, 2, "brqsupcb");
1683 rc = qeth_bridgeport_makerc(card, cmd);
1684 if (rc)
1685 return rc;
1686
1687 cbctl->data.supported =
1688 cmd->data.sbp.data.query_cmds_supp.supported_cmds;
1689 return 0;
1690 }
1691
1692 /**
1693 * qeth_bridgeport_query_support() - store bitmask of supported subfunctions.
1694 * @card: qeth_card structure pointer.
1695 *
1696 * Sets bitmask of supported setbridgeport subfunctions in the qeth_card
1697 * strucutre: card->options.sbp.supported_funcs.
1698 */
qeth_bridgeport_query_support(struct qeth_card * card)1699 static void qeth_bridgeport_query_support(struct qeth_card *card)
1700 {
1701 struct qeth_cmd_buffer *iob;
1702 struct _qeth_sbp_cbctl cbctl;
1703
1704 QETH_CARD_TEXT(card, 2, "brqsuppo");
1705 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED,
1706 SBP_DATA_SIZEOF(query_cmds_supp));
1707 if (!iob)
1708 return;
1709
1710 if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb,
1711 &cbctl)) {
1712 card->options.sbp.role = QETH_SBP_ROLE_NONE;
1713 card->options.sbp.supported_funcs = 0;
1714 return;
1715 }
1716 card->options.sbp.supported_funcs = cbctl.data.supported;
1717 }
1718
qeth_bridgeport_query_ports_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1719 static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
1720 struct qeth_reply *reply, unsigned long data)
1721 {
1722 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1723 struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
1724 struct qeth_sbp_port_data *qports;
1725 int rc;
1726
1727 QETH_CARD_TEXT(card, 2, "brqprtcb");
1728 rc = qeth_bridgeport_makerc(card, cmd);
1729 if (rc)
1730 return rc;
1731
1732 qports = &cmd->data.sbp.data.port_data;
1733 if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
1734 QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
1735 return -EINVAL;
1736 }
1737 /* first entry contains the state of the local port */
1738 if (qports->num_entries > 0) {
1739 if (cbctl->data.qports.role)
1740 *cbctl->data.qports.role = qports->entry[0].role;
1741 if (cbctl->data.qports.state)
1742 *cbctl->data.qports.state = qports->entry[0].state;
1743 }
1744 return 0;
1745 }
1746
1747 /**
1748 * qeth_bridgeport_query_ports() - query local bridgeport status.
1749 * @card: qeth_card structure pointer.
1750 * @role: Role of the port: 0-none, 1-primary, 2-secondary.
1751 * @state: State of the port: 0-inactive, 1-standby, 2-active.
1752 *
1753 * Returns negative errno-compatible error indication or 0 on success.
1754 *
1755 * 'role' and 'state' are not updated in case of hardware operation failure.
1756 */
qeth_bridgeport_query_ports(struct qeth_card * card,enum qeth_sbp_roles * role,enum qeth_sbp_states * state)1757 int qeth_bridgeport_query_ports(struct qeth_card *card,
1758 enum qeth_sbp_roles *role, enum qeth_sbp_states *state)
1759 {
1760 struct qeth_cmd_buffer *iob;
1761 struct _qeth_sbp_cbctl cbctl = {
1762 .data = {
1763 .qports = {
1764 .role = role,
1765 .state = state,
1766 },
1767 },
1768 };
1769
1770 QETH_CARD_TEXT(card, 2, "brqports");
1771 if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
1772 return -EOPNOTSUPP;
1773 iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0);
1774 if (!iob)
1775 return -ENOMEM;
1776
1777 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
1778 &cbctl);
1779 }
1780
qeth_bridgeport_set_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1781 static int qeth_bridgeport_set_cb(struct qeth_card *card,
1782 struct qeth_reply *reply, unsigned long data)
1783 {
1784 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
1785
1786 QETH_CARD_TEXT(card, 2, "brsetrcb");
1787 return qeth_bridgeport_makerc(card, cmd);
1788 }
1789
1790 /**
1791 * qeth_bridgeport_setrole() - Assign primary role to the port.
1792 * @card: qeth_card structure pointer.
1793 * @role: Role to assign.
1794 *
1795 * Returns negative errno-compatible error indication or 0 on success.
1796 */
qeth_bridgeport_setrole(struct qeth_card * card,enum qeth_sbp_roles role)1797 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
1798 {
1799 struct qeth_cmd_buffer *iob;
1800 enum qeth_ipa_sbp_cmd setcmd;
1801 unsigned int cmdlength = 0;
1802
1803 QETH_CARD_TEXT(card, 2, "brsetrol");
1804 switch (role) {
1805 case QETH_SBP_ROLE_NONE:
1806 setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE;
1807 break;
1808 case QETH_SBP_ROLE_PRIMARY:
1809 setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT;
1810 cmdlength = SBP_DATA_SIZEOF(set_primary);
1811 break;
1812 case QETH_SBP_ROLE_SECONDARY:
1813 setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT;
1814 break;
1815 default:
1816 return -EINVAL;
1817 }
1818 if (!(card->options.sbp.supported_funcs & setcmd))
1819 return -EOPNOTSUPP;
1820 iob = qeth_sbp_build_cmd(card, setcmd, cmdlength);
1821 if (!iob)
1822 return -ENOMEM;
1823
1824 return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
1825 }
1826
qeth_bridgeport_an_set_cb(void * priv,struct chsc_pnso_naid_l2 * entry)1827 static void qeth_bridgeport_an_set_cb(void *priv,
1828 struct chsc_pnso_naid_l2 *entry)
1829 {
1830 struct qeth_card *card = (struct qeth_card *)priv;
1831 u8 code;
1832
1833 code = IPA_ADDR_CHANGE_CODE_MACADDR;
1834 if (entry->addr_lnid.lnid < VLAN_N_VID)
1835 code |= IPA_ADDR_CHANGE_CODE_VLANID;
1836 qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
1837 (struct net_if_token *)&entry->nit,
1838 (struct mac_addr_lnid *)&entry->addr_lnid);
1839 }
1840
1841 /**
1842 * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification
1843 * @card: qeth_card structure pointer.
1844 * @enable: 0 - disable, non-zero - enable notifications
1845 *
1846 * Returns negative errno-compatible error indication or 0 on success.
1847 *
1848 * On enable, emits a series of address notifications udev events for all
1849 * currently registered hosts.
1850 */
qeth_bridgeport_an_set(struct qeth_card * card,int enable)1851 int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
1852 {
1853 int rc;
1854
1855 if (!card->options.sbp.supported_funcs)
1856 return -EOPNOTSUPP;
1857
1858 if (enable) {
1859 qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
1860 qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
1861 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
1862 qeth_bridgeport_an_set_cb, card);
1863 if (rc)
1864 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1865 } else {
1866 rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
1867 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
1868 }
1869 return rc;
1870 }
1871
1872 /* VNIC Characteristics support */
1873
1874 /* handle VNICC IPA command return codes; convert to error codes */
qeth_l2_vnicc_makerc(struct qeth_card * card,u16 ipa_rc)1875 static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
1876 {
1877 int rc;
1878
1879 switch (ipa_rc) {
1880 case IPA_RC_SUCCESS:
1881 return ipa_rc;
1882 case IPA_RC_L2_UNSUPPORTED_CMD:
1883 case IPA_RC_NOTSUPP:
1884 rc = -EOPNOTSUPP;
1885 break;
1886 case IPA_RC_VNICC_OOSEQ:
1887 rc = -EALREADY;
1888 break;
1889 case IPA_RC_VNICC_VNICBP:
1890 rc = -EBUSY;
1891 break;
1892 case IPA_RC_L2_ADDR_TABLE_FULL:
1893 rc = -ENOSPC;
1894 break;
1895 case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
1896 rc = -EACCES;
1897 break;
1898 default:
1899 rc = -EIO;
1900 }
1901
1902 QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
1903 return rc;
1904 }
1905
1906 /* generic VNICC request call back */
qeth_l2_vnicc_request_cb(struct qeth_card * card,struct qeth_reply * reply,unsigned long data)1907 static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
1908 struct qeth_reply *reply,
1909 unsigned long data)
1910 {
1911 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
1912 struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
1913 u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
1914
1915 QETH_CARD_TEXT(card, 2, "vniccrcb");
1916 if (cmd->hdr.return_code)
1917 return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code);
1918 /* return results to caller */
1919 card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
1920 card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
1921
1922 if (sub_cmd == IPA_VNICC_QUERY_CMDS)
1923 *(u32 *)reply->param = rep->data.query_cmds.sup_cmds;
1924 else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
1925 *(u32 *)reply->param = rep->data.getset_timeout.timeout;
1926
1927 return 0;
1928 }
1929
qeth_l2_vnicc_build_cmd(struct qeth_card * card,u32 vnicc_cmd,unsigned int data_length)1930 static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
1931 u32 vnicc_cmd,
1932 unsigned int data_length)
1933 {
1934 struct qeth_ipacmd_vnicc_hdr *hdr;
1935 struct qeth_cmd_buffer *iob;
1936
1937 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE,
1938 data_length +
1939 offsetof(struct qeth_ipacmd_vnicc, data));
1940 if (!iob)
1941 return NULL;
1942
1943 hdr = &__ipa_cmd(iob)->data.vnicc.hdr;
1944 hdr->data_length = sizeof(*hdr) + data_length;
1945 hdr->sub_command = vnicc_cmd;
1946 return iob;
1947 }
1948
1949 /* VNICC query VNIC characteristics request */
qeth_l2_vnicc_query_chars(struct qeth_card * card)1950 static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
1951 {
1952 struct qeth_cmd_buffer *iob;
1953
1954 QETH_CARD_TEXT(card, 2, "vniccqch");
1955 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0);
1956 if (!iob)
1957 return -ENOMEM;
1958
1959 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1960 }
1961
1962 /* VNICC query sub commands request */
qeth_l2_vnicc_query_cmds(struct qeth_card * card,u32 vnic_char,u32 * sup_cmds)1963 static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
1964 u32 *sup_cmds)
1965 {
1966 struct qeth_cmd_buffer *iob;
1967
1968 QETH_CARD_TEXT(card, 2, "vniccqcm");
1969 iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS,
1970 VNICC_DATA_SIZEOF(query_cmds));
1971 if (!iob)
1972 return -ENOMEM;
1973
1974 __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
1975
1976 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds);
1977 }
1978
1979 /* VNICC enable/disable characteristic request */
qeth_l2_vnicc_set_char(struct qeth_card * card,u32 vnic_char,u32 cmd)1980 static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
1981 u32 cmd)
1982 {
1983 struct qeth_cmd_buffer *iob;
1984
1985 QETH_CARD_TEXT(card, 2, "vniccedc");
1986 iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char));
1987 if (!iob)
1988 return -ENOMEM;
1989
1990 __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
1991
1992 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
1993 }
1994
1995 /* VNICC get/set timeout for characteristic request */
qeth_l2_vnicc_getset_timeout(struct qeth_card * card,u32 vnicc,u32 cmd,u32 * timeout)1996 static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
1997 u32 cmd, u32 *timeout)
1998 {
1999 struct qeth_vnicc_getset_timeout *getset_timeout;
2000 struct qeth_cmd_buffer *iob;
2001
2002 QETH_CARD_TEXT(card, 2, "vniccgst");
2003 iob = qeth_l2_vnicc_build_cmd(card, cmd,
2004 VNICC_DATA_SIZEOF(getset_timeout));
2005 if (!iob)
2006 return -ENOMEM;
2007
2008 getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout;
2009 getset_timeout->vnic_char = vnicc;
2010
2011 if (cmd == IPA_VNICC_SET_TIMEOUT)
2012 getset_timeout->timeout = *timeout;
2013
2014 return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
2015 }
2016
2017 /* recover user timeout setting */
qeth_l2_vnicc_recover_timeout(struct qeth_card * card,u32 vnicc,u32 * timeout)2018 static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
2019 u32 *timeout)
2020 {
2021 if (card->options.vnicc.sup_chars & vnicc &&
2022 card->options.vnicc.getset_timeout_sup & vnicc &&
2023 !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
2024 timeout))
2025 return false;
2026 *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2027 return true;
2028 }
2029
2030 /* set current VNICC flag state; called from sysfs store function */
qeth_l2_vnicc_set_state(struct qeth_card * card,u32 vnicc,bool state)2031 int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
2032 {
2033 int rc = 0;
2034 u32 cmd;
2035
2036 QETH_CARD_TEXT(card, 2, "vniccsch");
2037
2038 /* check if characteristic and enable/disable are supported */
2039 if (!(card->options.vnicc.sup_chars & vnicc) ||
2040 !(card->options.vnicc.set_char_sup & vnicc))
2041 return -EOPNOTSUPP;
2042
2043 if (qeth_bridgeport_is_in_use(card))
2044 return -EBUSY;
2045
2046 /* set enable/disable command and store wanted characteristic */
2047 if (state) {
2048 cmd = IPA_VNICC_ENABLE;
2049 card->options.vnicc.wanted_chars |= vnicc;
2050 } else {
2051 cmd = IPA_VNICC_DISABLE;
2052 card->options.vnicc.wanted_chars &= ~vnicc;
2053 }
2054
2055 /* do we need to do anything? */
2056 if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
2057 return rc;
2058
2059 /* if card is not ready, simply stop here */
2060 if (!qeth_card_hw_is_reachable(card)) {
2061 if (state)
2062 card->options.vnicc.cur_chars |= vnicc;
2063 else
2064 card->options.vnicc.cur_chars &= ~vnicc;
2065 return rc;
2066 }
2067
2068 rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
2069 if (rc)
2070 card->options.vnicc.wanted_chars =
2071 card->options.vnicc.cur_chars;
2072 else {
2073 /* successful online VNICC change; handle special cases */
2074 if (state && vnicc == QETH_VNICC_RX_BCAST)
2075 card->options.vnicc.rx_bcast_enabled = true;
2076 if (!state && vnicc == QETH_VNICC_LEARNING)
2077 qeth_l2_vnicc_recover_timeout(card, vnicc,
2078 &card->options.vnicc.learning_timeout);
2079 }
2080
2081 return rc;
2082 }
2083
2084 /* get current VNICC flag state; called from sysfs show function */
qeth_l2_vnicc_get_state(struct qeth_card * card,u32 vnicc,bool * state)2085 int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
2086 {
2087 int rc = 0;
2088
2089 QETH_CARD_TEXT(card, 2, "vniccgch");
2090
2091 /* check if characteristic is supported */
2092 if (!(card->options.vnicc.sup_chars & vnicc))
2093 return -EOPNOTSUPP;
2094
2095 if (qeth_bridgeport_is_in_use(card))
2096 return -EBUSY;
2097
2098 /* if card is ready, query current VNICC state */
2099 if (qeth_card_hw_is_reachable(card))
2100 rc = qeth_l2_vnicc_query_chars(card);
2101
2102 *state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
2103 return rc;
2104 }
2105
2106 /* set VNICC timeout; called from sysfs store function. Currently, only learning
2107 * supports timeout
2108 */
qeth_l2_vnicc_set_timeout(struct qeth_card * card,u32 timeout)2109 int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
2110 {
2111 int rc = 0;
2112
2113 QETH_CARD_TEXT(card, 2, "vniccsto");
2114
2115 /* check if characteristic and set_timeout are supported */
2116 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2117 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2118 return -EOPNOTSUPP;
2119
2120 if (qeth_bridgeport_is_in_use(card))
2121 return -EBUSY;
2122
2123 /* do we need to do anything? */
2124 if (card->options.vnicc.learning_timeout == timeout)
2125 return rc;
2126
2127 /* if card is not ready, simply store the value internally and return */
2128 if (!qeth_card_hw_is_reachable(card)) {
2129 card->options.vnicc.learning_timeout = timeout;
2130 return rc;
2131 }
2132
2133 /* send timeout value to card; if successful, store value internally */
2134 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2135 IPA_VNICC_SET_TIMEOUT, &timeout);
2136 if (!rc)
2137 card->options.vnicc.learning_timeout = timeout;
2138
2139 return rc;
2140 }
2141
2142 /* get current VNICC timeout; called from sysfs show function. Currently, only
2143 * learning supports timeout
2144 */
qeth_l2_vnicc_get_timeout(struct qeth_card * card,u32 * timeout)2145 int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
2146 {
2147 int rc = 0;
2148
2149 QETH_CARD_TEXT(card, 2, "vniccgto");
2150
2151 /* check if characteristic and get_timeout are supported */
2152 if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
2153 !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
2154 return -EOPNOTSUPP;
2155
2156 if (qeth_bridgeport_is_in_use(card))
2157 return -EBUSY;
2158
2159 /* if card is ready, get timeout. Otherwise, just return stored value */
2160 *timeout = card->options.vnicc.learning_timeout;
2161 if (qeth_card_hw_is_reachable(card))
2162 rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
2163 IPA_VNICC_GET_TIMEOUT,
2164 timeout);
2165
2166 return rc;
2167 }
2168
2169 /* check if VNICC is currently enabled */
_qeth_l2_vnicc_is_in_use(struct qeth_card * card)2170 static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
2171 {
2172 if (!card->options.vnicc.sup_chars)
2173 return false;
2174 /* default values are only OK if rx_bcast was not enabled by user
2175 * or the card is offline.
2176 */
2177 if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
2178 if (!card->options.vnicc.rx_bcast_enabled ||
2179 !qeth_card_hw_is_reachable(card))
2180 return false;
2181 }
2182 return true;
2183 }
2184
2185 /**
2186 * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
2187 * @card: qeth_card structure pointer
2188 *
2189 * qeth_bridgeport functionality is mutually exclusive with usage of the
2190 * VNIC Characteristics and dev2br address notifications
2191 */
qeth_bridgeport_allowed(struct qeth_card * card)2192 bool qeth_bridgeport_allowed(struct qeth_card *card)
2193 {
2194 struct qeth_priv *priv = netdev_priv(card->dev);
2195
2196 return (!_qeth_l2_vnicc_is_in_use(card) &&
2197 !(priv->brport_features & BR_LEARNING_SYNC));
2198 }
2199
2200 /* recover user characteristic setting */
qeth_l2_vnicc_recover_char(struct qeth_card * card,u32 vnicc,bool enable)2201 static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
2202 bool enable)
2203 {
2204 u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
2205
2206 if (card->options.vnicc.sup_chars & vnicc &&
2207 card->options.vnicc.set_char_sup & vnicc &&
2208 !qeth_l2_vnicc_set_char(card, vnicc, cmd))
2209 return false;
2210 card->options.vnicc.wanted_chars &= ~vnicc;
2211 card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
2212 return true;
2213 }
2214
2215 /* (re-)initialize VNICC */
qeth_l2_vnicc_init(struct qeth_card * card)2216 static void qeth_l2_vnicc_init(struct qeth_card *card)
2217 {
2218 u32 *timeout = &card->options.vnicc.learning_timeout;
2219 bool enable, error = false;
2220 unsigned int chars_len, i;
2221 unsigned long chars_tmp;
2222 u32 sup_cmds, vnicc;
2223
2224 QETH_CARD_TEXT(card, 2, "vniccini");
2225 /* reset rx_bcast */
2226 card->options.vnicc.rx_bcast_enabled = 0;
2227 /* initial query and storage of VNIC characteristics */
2228 if (qeth_l2_vnicc_query_chars(card)) {
2229 if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
2230 *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
2231 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2232 /* fail quietly if user didn't change the default config */
2233 card->options.vnicc.sup_chars = 0;
2234 card->options.vnicc.cur_chars = 0;
2235 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2236 return;
2237 }
2238 /* get supported commands for each supported characteristic */
2239 chars_tmp = card->options.vnicc.sup_chars;
2240 chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
2241 for_each_set_bit(i, &chars_tmp, chars_len) {
2242 vnicc = BIT(i);
2243 if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
2244 sup_cmds = 0;
2245 error = true;
2246 }
2247 if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
2248 (sup_cmds & IPA_VNICC_GET_TIMEOUT))
2249 card->options.vnicc.getset_timeout_sup |= vnicc;
2250 else
2251 card->options.vnicc.getset_timeout_sup &= ~vnicc;
2252 if ((sup_cmds & IPA_VNICC_ENABLE) &&
2253 (sup_cmds & IPA_VNICC_DISABLE))
2254 card->options.vnicc.set_char_sup |= vnicc;
2255 else
2256 card->options.vnicc.set_char_sup &= ~vnicc;
2257 }
2258 /* enforce assumed default values and recover settings, if changed */
2259 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
2260 timeout);
2261 /* Change chars, if necessary */
2262 chars_tmp = card->options.vnicc.wanted_chars ^
2263 card->options.vnicc.cur_chars;
2264 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
2265 for_each_set_bit(i, &chars_tmp, chars_len) {
2266 vnicc = BIT(i);
2267 enable = card->options.vnicc.wanted_chars & vnicc;
2268 error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
2269 }
2270 if (error)
2271 dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
2272 }
2273
2274 /* configure default values of VNIC characteristics */
qeth_l2_vnicc_set_defaults(struct qeth_card * card)2275 static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
2276 {
2277 /* characteristics values */
2278 card->options.vnicc.sup_chars = QETH_VNICC_ALL;
2279 card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
2280 card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
2281 /* supported commands */
2282 card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
2283 card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
2284 /* settings wanted by users */
2285 card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
2286 }
2287
2288 static const struct device_type qeth_l2_devtype = {
2289 .name = "qeth_layer2",
2290 .groups = qeth_l2_attr_groups,
2291 };
2292
qeth_l2_probe_device(struct ccwgroup_device * gdev)2293 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
2294 {
2295 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2296 int rc;
2297
2298 qeth_l2_vnicc_set_defaults(card);
2299 mutex_init(&card->sbp_lock);
2300
2301 if (gdev->dev.type) {
2302 rc = device_add_groups(&gdev->dev, qeth_l2_attr_groups);
2303 if (rc)
2304 return rc;
2305 } else {
2306 gdev->dev.type = &qeth_l2_devtype;
2307 }
2308
2309 INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
2310 return 0;
2311 }
2312
qeth_l2_remove_device(struct ccwgroup_device * gdev)2313 static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
2314 {
2315 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2316 struct qeth_priv *priv;
2317
2318 if (gdev->dev.type != &qeth_l2_devtype)
2319 device_remove_groups(&gdev->dev, qeth_l2_attr_groups);
2320
2321 qeth_set_allowed_threads(card, 0, 1);
2322 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
2323
2324 if (gdev->state == CCWGROUP_ONLINE)
2325 qeth_set_offline(card, card->discipline, false);
2326
2327 if (card->dev->reg_state == NETREG_REGISTERED) {
2328 priv = netdev_priv(card->dev);
2329 if (priv->brport_features & BR_LEARNING_SYNC) {
2330 rtnl_lock();
2331 qeth_l2_br2dev_put();
2332 rtnl_unlock();
2333 }
2334 unregister_netdev(card->dev);
2335 }
2336 }
2337
qeth_l2_set_online(struct qeth_card * card,bool carrier_ok)2338 static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
2339 {
2340 struct net_device *dev = card->dev;
2341 int rc = 0;
2342
2343 qeth_l2_detect_dev2br_support(card);
2344
2345 mutex_lock(&card->sbp_lock);
2346 qeth_bridgeport_query_support(card);
2347 if (card->options.sbp.supported_funcs) {
2348 qeth_l2_setup_bridgeport_attrs(card);
2349 dev_info(&card->gdev->dev,
2350 "The device represents a Bridge Capable Port\n");
2351 }
2352 mutex_unlock(&card->sbp_lock);
2353
2354 qeth_l2_register_dev_addr(card);
2355
2356 /* for the rx_bcast characteristic, init VNICC after setmac */
2357 qeth_l2_vnicc_init(card);
2358
2359 qeth_l2_trace_features(card);
2360
2361 /* softsetup */
2362 QETH_CARD_TEXT(card, 2, "softsetp");
2363
2364 card->state = CARD_STATE_SOFTSETUP;
2365
2366 qeth_set_allowed_threads(card, 0xffffffff, 0);
2367
2368 if (dev->reg_state != NETREG_REGISTERED) {
2369 rc = qeth_l2_setup_netdev(card);
2370 if (rc)
2371 goto err_setup;
2372
2373 if (carrier_ok)
2374 netif_carrier_on(dev);
2375 } else {
2376 rtnl_lock();
2377 rc = qeth_set_real_num_tx_queues(card,
2378 qeth_tx_actual_queues(card));
2379 if (rc) {
2380 rtnl_unlock();
2381 goto err_set_queues;
2382 }
2383
2384 if (carrier_ok)
2385 netif_carrier_on(dev);
2386 else
2387 netif_carrier_off(dev);
2388
2389 netif_device_attach(dev);
2390 qeth_enable_hw_features(dev);
2391 qeth_l2_enable_brport_features(card);
2392
2393 if (netif_running(dev)) {
2394 local_bh_disable();
2395 napi_schedule(&card->napi);
2396 /* kick-start the NAPI softirq: */
2397 local_bh_enable();
2398 qeth_l2_set_rx_mode(dev);
2399 }
2400 rtnl_unlock();
2401 }
2402 return 0;
2403
2404 err_set_queues:
2405 err_setup:
2406 qeth_set_allowed_threads(card, 0, 1);
2407 card->state = CARD_STATE_DOWN;
2408 return rc;
2409 }
2410
qeth_l2_set_offline(struct qeth_card * card)2411 static void qeth_l2_set_offline(struct qeth_card *card)
2412 {
2413 struct qeth_priv *priv = netdev_priv(card->dev);
2414
2415 qeth_set_allowed_threads(card, 0, 1);
2416 qeth_l2_drain_rx_mode_cache(card);
2417
2418 if (card->state == CARD_STATE_SOFTSETUP)
2419 card->state = CARD_STATE_DOWN;
2420
2421 qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
2422 if (priv->brport_features & BR_LEARNING_SYNC)
2423 qeth_l2_dev2br_fdb_flush(card);
2424 }
2425
2426 /* Returns zero if the command is successfully "consumed" */
qeth_l2_control_event(struct qeth_card * card,struct qeth_ipa_cmd * cmd)2427 static int qeth_l2_control_event(struct qeth_card *card,
2428 struct qeth_ipa_cmd *cmd)
2429 {
2430 switch (cmd->hdr.command) {
2431 case IPA_CMD_SETBRIDGEPORT_OSA:
2432 case IPA_CMD_SETBRIDGEPORT_IQD:
2433 if (cmd->data.sbp.hdr.command_code ==
2434 IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
2435 qeth_bridge_state_change(card, cmd);
2436 return 0;
2437 }
2438
2439 return 1;
2440 case IPA_CMD_ADDRESS_CHANGE_NOTIF:
2441 qeth_addr_change_event(card, cmd);
2442 return 0;
2443 default:
2444 return 1;
2445 }
2446 }
2447
2448 const struct qeth_discipline qeth_l2_discipline = {
2449 .setup = qeth_l2_probe_device,
2450 .remove = qeth_l2_remove_device,
2451 .set_online = qeth_l2_set_online,
2452 .set_offline = qeth_l2_set_offline,
2453 .control_event_handler = qeth_l2_control_event,
2454 };
2455 EXPORT_SYMBOL_GPL(qeth_l2_discipline);
2456
qeth_l2_init(void)2457 static int __init qeth_l2_init(void)
2458 {
2459 pr_info("register layer 2 discipline\n");
2460 refcount_set(&qeth_l2_switchdev_notify_refcnt, 0);
2461 return 0;
2462 }
2463
qeth_l2_exit(void)2464 static void __exit qeth_l2_exit(void)
2465 {
2466 pr_info("unregister layer 2 discipline\n");
2467 }
2468
2469 module_init(qeth_l2_init);
2470 module_exit(qeth_l2_exit);
2471 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
2472 MODULE_DESCRIPTION("qeth layer 2 discipline");
2473 MODULE_LICENSE("GPL");
2474