1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/ethtool.h> 6 #include <net/ipv6.h> 7 8 #include "fbnic.h" 9 #include "fbnic_netdev.h" 10 #include "fbnic_rpc.h" 11 12 void fbnic_reset_indir_tbl(struct fbnic_net *fbn) 13 { 14 unsigned int num_rx = fbn->num_rx_queues; 15 unsigned int i; 16 17 if (netif_is_rxfh_configured(fbn->netdev)) 18 return; 19 20 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) 21 fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx); 22 } 23 24 void fbnic_rss_key_fill(u32 *buffer) 25 { 26 static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; 27 28 net_get_random_once(rss_key, sizeof(rss_key)); 29 rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK; 30 31 memcpy(buffer, rss_key, sizeof(rss_key)); 32 } 33 34 #define RX_HASH_OPT_L4 \ 35 (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) 36 #define RX_HASH_OPT_L3 \ 37 (RXH_IP_SRC | RXH_IP_DST) 38 #define RX_HASH_OPT_L2 RXH_L2DA 39 40 void fbnic_rss_init_en_mask(struct fbnic_net *fbn) 41 { 42 fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4; 43 fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4; 44 45 fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3; 46 fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3; 47 fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3; 48 fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3; 49 50 fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2; 51 } 52 53 void fbnic_rss_disable_hw(struct fbnic_dev *fbd) 54 { 55 /* Disable RPC by clearing enable bit and configuration */ 56 if (!fbnic_bmc_present(fbd)) 57 wr32(fbd, FBNIC_RPC_RMI_CONFIG, 58 FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20)); 59 } 60 61 #define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \ 62 FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \ 63 FIELD_GET(RXH_##_fh, _val)) 64 u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type) 65 { 66 u32 flow_hash = fbn->rss_flow_hash[flow_type]; 67 u32 rss_en_mask = 0; 68 69 rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash); 70 rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash); 71 rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash); 72 rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash); 73 rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash); 74 75 return rss_en_mask; 76 } 77 78 void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn) 79 { 80 unsigned int i; 81 82 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { 83 wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]); 84 wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]); 85 } 86 87 for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++) 88 wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]); 89 90 /* Default action for this to drop w/ no destination */ 91 wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP); 92 wrfl(fbd); 93 94 wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0); 95 96 /* If it isn't already enabled set the RMI Config value to enable RPC */ 97 wr32(fbd, FBNIC_RPC_RMI_CONFIG, 98 FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) | 99 FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) | 100 FBNIC_RPC_RMI_CONFIG_ENABLE); 101 } 102 103 void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, 104 bool enable_host) 105 { 106 struct fbnic_act_tcam *act_tcam; 107 struct fbnic_mac_addr *mac_addr; 108 int j; 109 110 /* We need to add the all multicast filter at the end of the 111 * multicast address list. This way if there are any that are 112 * shared between the host and the BMC they can be directed to 113 * both. Otherwise the remainder just get sent directly to the 114 * BMC. 115 */ 116 mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1]; 117 if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) { 118 if (mac_addr->state != FBNIC_TCAM_S_VALID) { 119 eth_zero_addr(mac_addr->value.addr8); 120 eth_broadcast_addr(mac_addr->mask.addr8); 121 mac_addr->value.addr8[0] ^= 1; 122 mac_addr->mask.addr8[0] ^= 1; 123 set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); 124 mac_addr->state = FBNIC_TCAM_S_ADD; 125 } 126 if (enable_host) 127 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, 128 mac_addr->act_tcam); 129 else 130 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, 131 mac_addr->act_tcam); 132 } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) && 133 !is_zero_ether_addr(mac_addr->mask.addr8) && 134 mac_addr->state == FBNIC_TCAM_S_VALID) { 135 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam); 136 clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); 137 mac_addr->state = FBNIC_TCAM_S_DELETE; 138 } 139 140 /* We have to add a special handler for multicast as the 141 * BMC may have an all-multi rule already in place. As such 142 * adding a rule ourselves won't do any good so we will have 143 * to modify the rules for the ALL MULTI below if the BMC 144 * already has the rule in place. 145 */ 146 act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET]; 147 148 /* If we are not enabling the rule just delete it. We will fall 149 * back to the RSS rules that support the multicast addresses. 150 */ 151 if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) { 152 if (act_tcam->state == FBNIC_TCAM_S_VALID) 153 act_tcam->state = FBNIC_TCAM_S_DELETE; 154 return; 155 } 156 157 /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */ 158 act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, 159 FBNIC_RPC_ACT_TBL0_DEST_BMC); 160 act_tcam->mask.tcam[0] = 0xffff; 161 162 /* MACDA 0 - 3 is reserved for the BMC MAC address */ 163 act_tcam->value.tcam[1] = 164 FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 165 fbd->mac_addr_boundary - 1) | 166 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; 167 act_tcam->mask.tcam[1] = 0xffff & 168 ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX & 169 ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; 170 171 for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) 172 act_tcam->mask.tcam[j] = 0xffff; 173 174 act_tcam->state = FBNIC_TCAM_S_UPDATE; 175 } 176 177 void fbnic_bmc_rpc_init(struct fbnic_dev *fbd) 178 { 179 int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX; 180 struct fbnic_act_tcam *act_tcam; 181 struct fbnic_mac_addr *mac_addr; 182 int j; 183 184 /* Check if BMC is present */ 185 if (!fbnic_bmc_present(fbd)) 186 return; 187 188 /* Fetch BMC MAC addresses from firmware capabilities */ 189 for (j = 0; j < 4; j++) { 190 u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j]; 191 192 /* Validate BMC MAC addresses */ 193 if (is_zero_ether_addr(bmc_mac)) 194 continue; 195 196 if (is_multicast_ether_addr(bmc_mac)) 197 mac_addr = __fbnic_mc_sync(fbd, bmc_mac); 198 else 199 mac_addr = &fbd->mac_addr[i++]; 200 201 if (!mac_addr) { 202 netdev_err(fbd->netdev, 203 "No slot for BMC MAC address[%d]\n", j); 204 continue; 205 } 206 207 ether_addr_copy(mac_addr->value.addr8, bmc_mac); 208 eth_zero_addr(mac_addr->mask.addr8); 209 210 set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); 211 mac_addr->state = FBNIC_TCAM_S_ADD; 212 } 213 214 /* Validate Broadcast is also present, record it and tag it */ 215 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; 216 eth_broadcast_addr(mac_addr->value.addr8); 217 set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); 218 mac_addr->state = FBNIC_TCAM_S_ADD; 219 220 /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */ 221 act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET]; 222 act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, 223 FBNIC_RPC_ACT_TBL0_DEST_BMC); 224 act_tcam->mask.tcam[0] = 0xffff; 225 226 /* MACDA 0 - 3 is reserved for the BMC MAC address 227 * to account for that we have to mask out the lower 2 bits 228 * of the macda by performing an &= with 0x1c. 229 */ 230 act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; 231 act_tcam->mask.tcam[1] = 0xffff & 232 ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) & 233 ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; 234 235 for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) 236 act_tcam->mask.tcam[j] = 0xffff; 237 238 act_tcam->state = FBNIC_TCAM_S_UPDATE; 239 240 fbnic_bmc_rpc_all_multi_config(fbd, false); 241 } 242 243 #define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \ 244 (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \ 245 ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \ 246 ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \ 247 ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0)) 248 249 #define FBNIC_TSTAMP_MASK(_all, _udp, _ether) \ 250 (((_all) ? ((1u << FBNIC_NUM_HASH_OPT) - 1) : 0) | \ 251 ((_udp) ? (1u << FBNIC_UDP6_HASH_OPT) | \ 252 (1u << FBNIC_UDP4_HASH_OPT) : 0) | \ 253 ((_ether) ? (1u << FBNIC_ETHER_HASH_OPT) : 0)) 254 255 void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) 256 { 257 static const u32 act1_value[FBNIC_NUM_HASH_OPT] = { 258 FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */ 259 FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */ 260 FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */ 261 FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */ 262 FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */ 263 FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */ 264 0 /* Ether */ 265 }; 266 u32 tstamp_mask = 0; 267 unsigned int i; 268 269 /* To support scenarios where a BMC is present we must write the 270 * rules twice, once for the unicast cases, and once again for 271 * the broadcast/multicast cases as we have to support 2 destinations. 272 */ 273 BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES); 274 BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT); 275 276 /* Set timestamp mask with 1b per flow type */ 277 if (fbn->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 278 switch (fbn->hwtstamp_config.rx_filter) { 279 case HWTSTAMP_FILTER_ALL: 280 tstamp_mask = FBNIC_TSTAMP_MASK(1, 1, 1); 281 break; 282 case HWTSTAMP_FILTER_PTP_V2_EVENT: 283 tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 1); 284 break; 285 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 286 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 287 tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 0); 288 break; 289 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 290 tstamp_mask = FBNIC_TSTAMP_MASK(0, 0, 1); 291 break; 292 default: 293 netdev_warn(fbn->netdev, "Unsupported hwtstamp_rx_filter\n"); 294 break; 295 } 296 } 297 298 /* Program RSS hash enable mask for host in action TCAM/table. */ 299 for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST; 300 i < FBNIC_RSS_EN_NUM_ENTRIES; i++) { 301 unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET; 302 struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; 303 u32 flow_hash, dest, rss_en_mask; 304 int flow_type, j; 305 u16 value = 0; 306 307 flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; 308 flow_hash = fbn->rss_flow_hash[flow_type]; 309 310 /* Set DEST_HOST based on absence of RXH_DISCARD */ 311 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, 312 !(RXH_DISCARD & flow_hash) ? 313 FBNIC_RPC_ACT_TBL0_DEST_HOST : 0); 314 315 if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd)) 316 dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, 317 FBNIC_RPC_ACT_TBL0_DEST_BMC); 318 319 if (!dest) 320 dest = FBNIC_RPC_ACT_TBL0_DROP; 321 else if (tstamp_mask & (1u << flow_type)) 322 dest |= FBNIC_RPC_ACT_TBL0_TS_ENA; 323 324 if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID) 325 dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT, 326 FBNIC_RCD_HDR_AL_DMA_HINT_L4); 327 328 rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type); 329 330 act_tcam->dest = dest; 331 act_tcam->rss_en_mask = rss_en_mask; 332 act_tcam->state = FBNIC_TCAM_S_UPDATE; 333 334 act_tcam->mask.tcam[0] = 0xffff; 335 336 /* We reserve the upper 8 MACDA TCAM entries for host 337 * unicast. So we set the value to 24, and the mask the 338 * lower bits so that the lower entries can be used as 339 * multicast or BMC addresses. 340 */ 341 if (i < FBNIC_RSS_EN_NUM_UNICAST) 342 value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 343 fbd->mac_addr_boundary); 344 value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; 345 346 flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; 347 value |= act1_value[flow_type]; 348 349 act_tcam->value.tcam[1] = value; 350 act_tcam->mask.tcam[1] = ~value; 351 352 for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) 353 act_tcam->mask.tcam[j] = 0xffff; 354 355 act_tcam->state = FBNIC_TCAM_S_UPDATE; 356 } 357 } 358 359 struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, 360 const unsigned char *addr) 361 { 362 struct fbnic_mac_addr *avail_addr = NULL; 363 unsigned int i; 364 365 /* Scan from middle of list to bottom, filling bottom up. 366 * Skip the first entry which is reserved for dev_addr and 367 * leave the last entry to use for promiscuous filtering. 368 */ 369 for (i = fbd->mac_addr_boundary - 1; 370 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) { 371 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; 372 373 if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { 374 avail_addr = mac_addr; 375 } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { 376 avail_addr = mac_addr; 377 break; 378 } 379 } 380 381 if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { 382 ether_addr_copy(avail_addr->value.addr8, addr); 383 eth_zero_addr(avail_addr->mask.addr8); 384 avail_addr->state = FBNIC_TCAM_S_ADD; 385 } 386 387 return avail_addr; 388 } 389 390 struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, 391 const unsigned char *addr) 392 { 393 struct fbnic_mac_addr *avail_addr = NULL; 394 unsigned int i; 395 396 /* Scan from middle of list to top, filling top down. 397 * Skip over the address reserved for the BMC MAC and 398 * exclude index 0 as that belongs to the broadcast address 399 */ 400 for (i = fbd->mac_addr_boundary; 401 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) { 402 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; 403 404 if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { 405 avail_addr = mac_addr; 406 } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { 407 avail_addr = mac_addr; 408 break; 409 } 410 } 411 412 /* Scan the BMC addresses to see if it may have already 413 * reserved the address. 414 */ 415 while (--i) { 416 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; 417 418 if (!is_zero_ether_addr(mac_addr->mask.addr8)) 419 continue; 420 421 /* Only move on if we find a match */ 422 if (!ether_addr_equal(mac_addr->value.addr8, addr)) 423 continue; 424 425 /* We need to pull this address to the shared area */ 426 if (avail_addr) { 427 memcpy(avail_addr, mac_addr, sizeof(*mac_addr)); 428 mac_addr->state = FBNIC_TCAM_S_DELETE; 429 avail_addr->state = FBNIC_TCAM_S_ADD; 430 } 431 432 break; 433 } 434 435 if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { 436 ether_addr_copy(avail_addr->value.addr8, addr); 437 eth_zero_addr(avail_addr->mask.addr8); 438 avail_addr->state = FBNIC_TCAM_S_ADD; 439 } 440 441 return avail_addr; 442 } 443 444 int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx) 445 { 446 if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam)) 447 return -ENOENT; 448 449 if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) 450 mac_addr->state = FBNIC_TCAM_S_DELETE; 451 452 return 0; 453 } 454 455 void fbnic_sift_macda(struct fbnic_dev *fbd) 456 { 457 int dest, src; 458 459 /* Move BMC only addresses back into BMC region */ 460 for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX, 461 src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX; 462 ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && 463 src < fbd->mac_addr_boundary;) { 464 struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest]; 465 466 if (dest_addr->state != FBNIC_TCAM_S_DISABLED) 467 continue; 468 469 while (src < fbd->mac_addr_boundary) { 470 struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++]; 471 472 /* Verify BMC bit is set */ 473 if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam)) 474 continue; 475 476 /* Verify filter isn't already disabled */ 477 if (src_addr->state == FBNIC_TCAM_S_DISABLED || 478 src_addr->state == FBNIC_TCAM_S_DELETE) 479 continue; 480 481 /* Verify only BMC bit is set */ 482 if (bitmap_weight(src_addr->act_tcam, 483 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1) 484 continue; 485 486 /* Verify we are not moving wildcard address */ 487 if (!is_zero_ether_addr(src_addr->mask.addr8)) 488 continue; 489 490 memcpy(dest_addr, src_addr, sizeof(*src_addr)); 491 src_addr->state = FBNIC_TCAM_S_DELETE; 492 dest_addr->state = FBNIC_TCAM_S_ADD; 493 } 494 } 495 } 496 497 static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx) 498 { 499 int i; 500 501 /* Invalidate entry and clear addr state info */ 502 for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) 503 wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0); 504 } 505 506 static void fbnic_clear_macda(struct fbnic_dev *fbd) 507 { 508 int idx; 509 510 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { 511 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; 512 513 if (mac_addr->state == FBNIC_TCAM_S_DISABLED) 514 continue; 515 516 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { 517 if (fbnic_bmc_present(fbd)) 518 continue; 519 dev_warn_once(fbd->dev, 520 "Found BMC MAC address w/ BMC not present\n"); 521 } 522 523 fbnic_clear_macda_entry(fbd, idx); 524 525 /* If rule was already destined for deletion just wipe it now */ 526 if (mac_addr->state == FBNIC_TCAM_S_DELETE) { 527 memset(mac_addr, 0, sizeof(*mac_addr)); 528 continue; 529 } 530 531 /* Change state to update so that we will rewrite 532 * this tcam the next time fbnic_write_macda is called. 533 */ 534 mac_addr->state = FBNIC_TCAM_S_UPDATE; 535 } 536 } 537 538 static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx, 539 struct fbnic_mac_addr *mac_addr) 540 { 541 __be16 *mask, *value; 542 int i; 543 544 mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; 545 value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; 546 547 for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) 548 wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 549 FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) | 550 FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--))); 551 552 wrfl(fbd); 553 554 wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE); 555 } 556 557 void fbnic_write_macda(struct fbnic_dev *fbd) 558 { 559 int idx; 560 561 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { 562 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; 563 564 /* Check if update flag is set else exit. */ 565 if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE)) 566 continue; 567 568 /* Clear by writing 0s. */ 569 if (mac_addr->state == FBNIC_TCAM_S_DELETE) { 570 /* Invalidate entry and clear addr state info */ 571 fbnic_clear_macda_entry(fbd, idx); 572 memset(mac_addr, 0, sizeof(*mac_addr)); 573 574 continue; 575 } 576 577 fbnic_write_macda_entry(fbd, idx, mac_addr); 578 579 mac_addr->state = FBNIC_TCAM_S_VALID; 580 } 581 } 582 583 static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx) 584 { 585 int i; 586 587 /* Invalidate entry and clear addr state info */ 588 for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) 589 wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0); 590 } 591 592 static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx) 593 { 594 int i; 595 596 /* Invalidate entry and clear addr state info */ 597 for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++) 598 wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0); 599 } 600 601 static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx, 602 struct fbnic_mac_addr *mac_addr) 603 { 604 u32 dest = FBNIC_TCE_TCAM_DEST_BMC; 605 u32 idx2dest_map; 606 607 if (is_multicast_ether_addr(mac_addr->value.addr8)) 608 dest |= FBNIC_TCE_TCAM_DEST_MAC; 609 610 idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP); 611 idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx)); 612 idx2dest_map |= dest << (4 * idx); 613 614 wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map); 615 } 616 617 static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx, 618 struct fbnic_mac_addr *mac_addr) 619 { 620 __be16 *mask, *value; 621 int i; 622 623 mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1]; 624 value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1]; 625 626 for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++) 627 wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 628 FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) | 629 FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--))); 630 631 wrfl(fbd); 632 633 wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK | 634 FBNIC_TCE_RAM_TCAM3_DEST_MASK | 635 FBNIC_TCE_RAM_TCAM3_VALIDATE); 636 } 637 638 static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd) 639 { 640 int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES; 641 int mac_idx; 642 643 for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) { 644 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx]; 645 646 /* Verify BMC bit is set */ 647 if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) 648 continue; 649 650 if (!tcam_idx) { 651 dev_err(fbd->dev, "TCE TCAM overflow\n"); 652 return; 653 } 654 655 tcam_idx--; 656 fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr); 657 fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr); 658 } 659 660 while (tcam_idx) 661 fbnic_clear_tce_tcam_entry(fbd, --tcam_idx); 662 663 fbd->tce_tcam_last = tcam_idx; 664 } 665 666 static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd) 667 { 668 int tcam_idx = 0; 669 int mac_idx; 670 671 for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) { 672 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx]; 673 674 /* Verify BMC bit is set */ 675 if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) 676 continue; 677 678 if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) { 679 dev_err(fbd->dev, "TCE TCAM overflow\n"); 680 return; 681 } 682 683 fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr); 684 fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr); 685 tcam_idx++; 686 } 687 688 while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES) 689 fbnic_clear_tce_tcam_entry(fbd, tcam_idx++); 690 691 fbd->tce_tcam_last = tcam_idx; 692 } 693 694 void fbnic_write_tce_tcam(struct fbnic_dev *fbd) 695 { 696 if (fbd->tce_tcam_last) 697 __fbnic_write_tce_tcam_rev(fbd); 698 else 699 __fbnic_write_tce_tcam(fbd); 700 } 701 702 struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd, 703 struct fbnic_ip_addr *ip_addr, 704 const struct in_addr *addr, 705 const struct in_addr *mask) 706 { 707 struct fbnic_ip_addr *avail_addr = NULL; 708 unsigned int i; 709 710 /* Scan from top of list to bottom, filling bottom up. */ 711 for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) { 712 struct in6_addr *m = &ip_addr->mask; 713 714 if (ip_addr->state == FBNIC_TCAM_S_DISABLED) { 715 avail_addr = ip_addr; 716 continue; 717 } 718 719 if (ip_addr->version != 4) 720 continue; 721 722 /* Drop avail_addr if mask is a subset of our current mask, 723 * This prevents us from inserting a longer prefix behind a 724 * shorter one. 725 * 726 * The mask is stored inverted value so as an example: 727 * m ffff ffff ffff ffff ffff ffff ffff 0000 0000 728 * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff 729 * 730 * "m" and "mask" represent typical IPv4 mask stored in 731 * the TCAM and those provided by the stack. The code below 732 * should return a non-zero result if there is a 0 stored 733 * anywhere in "m" where "mask" has a 0. 734 */ 735 if (~m->s6_addr32[3] & ~mask->s_addr) { 736 avail_addr = NULL; 737 continue; 738 } 739 740 /* Check to see if the mask actually contains fewer bits than 741 * our new mask "m". The XOR below should only result in 0 if 742 * "m" is masking a bit that we are looking for in our new 743 * "mask", we eliminated the 0^0 case with the check above. 744 * 745 * If it contains fewer bits we need to stop here, otherwise 746 * we might be adding an unreachable rule. 747 */ 748 if (~(m->s6_addr32[3] ^ mask->s_addr)) 749 break; 750 751 if (ip_addr->value.s6_addr32[3] == addr->s_addr) { 752 avail_addr = ip_addr; 753 break; 754 } 755 } 756 757 if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { 758 ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr); 759 ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0), 760 htonl(~0), ~mask->s_addr); 761 avail_addr->version = 4; 762 763 avail_addr->state = FBNIC_TCAM_S_ADD; 764 } 765 766 return avail_addr; 767 } 768 769 struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd, 770 struct fbnic_ip_addr *ip_addr, 771 const struct in6_addr *addr, 772 const struct in6_addr *mask) 773 { 774 struct fbnic_ip_addr *avail_addr = NULL; 775 unsigned int i; 776 777 ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1]; 778 779 /* Scan from bottom of list to top, filling top down. */ 780 for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) { 781 struct in6_addr *m = &ip_addr->mask; 782 783 if (ip_addr->state == FBNIC_TCAM_S_DISABLED) { 784 avail_addr = ip_addr; 785 continue; 786 } 787 788 if (ip_addr->version != 6) 789 continue; 790 791 /* Drop avail_addr if mask is a superset of our current mask. 792 * This prevents us from inserting a longer prefix behind a 793 * shorter one. 794 * 795 * The mask is stored inverted value so as an example: 796 * m 0000 0000 0000 0000 0000 0000 0000 0000 0000 797 * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff 798 * 799 * "m" and "mask" represent typical IPv6 mask stored in 800 * the TCAM and those provided by the stack. The code below 801 * should return a non-zero result which will cause us 802 * to drop the avail_addr value that might be cached 803 * to prevent us from dropping a v6 address behind it. 804 */ 805 if ((m->s6_addr32[0] & mask->s6_addr32[0]) | 806 (m->s6_addr32[1] & mask->s6_addr32[1]) | 807 (m->s6_addr32[2] & mask->s6_addr32[2]) | 808 (m->s6_addr32[3] & mask->s6_addr32[3])) { 809 avail_addr = NULL; 810 continue; 811 } 812 813 /* The previous test eliminated any overlap between the 814 * two values so now we need to check for gaps. 815 * 816 * If the mask is equal to our current mask then it should 817 * result with m ^ mask = ffff ffff, if however the value 818 * stored in m is bigger then we should see a 0 appear 819 * somewhere in the mask. 820 */ 821 if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) | 822 ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) | 823 ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) | 824 ~(m->s6_addr32[3] ^ mask->s6_addr32[3])) 825 break; 826 827 if (ipv6_addr_cmp(&ip_addr->value, addr)) 828 continue; 829 830 avail_addr = ip_addr; 831 break; 832 } 833 834 if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { 835 memcpy(&avail_addr->value, addr, sizeof(*addr)); 836 ipv6_addr_set(&avail_addr->mask, 837 ~mask->s6_addr32[0], ~mask->s6_addr32[1], 838 ~mask->s6_addr32[2], ~mask->s6_addr32[3]); 839 avail_addr->version = 6; 840 841 avail_addr->state = FBNIC_TCAM_S_ADD; 842 } 843 844 return avail_addr; 845 } 846 847 int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx) 848 { 849 if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam)) 850 return -ENOENT; 851 852 if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) 853 ip_addr->state = FBNIC_TCAM_S_DELETE; 854 855 return 0; 856 } 857 858 static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx) 859 { 860 int i; 861 862 /* Invalidate entry and clear addr state info */ 863 for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 864 wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0); 865 } 866 867 static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx) 868 { 869 int i; 870 871 /* Invalidate entry and clear addr state info */ 872 for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 873 wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0); 874 } 875 876 static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd, 877 unsigned int idx) 878 { 879 int i; 880 881 /* Invalidate entry and clear addr state info */ 882 for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 883 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0); 884 } 885 886 static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd, 887 unsigned int idx) 888 { 889 int i; 890 891 /* Invalidate entry and clear addr state info */ 892 for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 893 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0); 894 } 895 896 static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx, 897 struct fbnic_ip_addr *ip_addr) 898 { 899 __be16 *mask, *value; 900 int i; 901 902 mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 903 value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 904 905 for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 906 wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 907 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | 908 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); 909 wrfl(fbd); 910 911 /* Bit 129 is used to flag for v4/v6 */ 912 wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 913 (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE); 914 } 915 916 static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx, 917 struct fbnic_ip_addr *ip_addr) 918 { 919 __be16 *mask, *value; 920 int i; 921 922 mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 923 value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 924 925 for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 926 wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 927 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | 928 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); 929 wrfl(fbd); 930 931 /* Bit 129 is used to flag for v4/v6 */ 932 wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 933 (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE); 934 } 935 936 static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd, 937 unsigned int idx, 938 struct fbnic_ip_addr *ip_addr) 939 { 940 __be16 *mask, *value; 941 int i; 942 943 mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 944 value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 945 946 for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 947 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 948 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | 949 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); 950 wrfl(fbd); 951 952 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE); 953 } 954 955 static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd, 956 unsigned int idx, 957 struct fbnic_ip_addr *ip_addr) 958 { 959 __be16 *mask, *value; 960 int i; 961 962 mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 963 value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; 964 965 for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) 966 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 967 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | 968 FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); 969 wrfl(fbd); 970 971 wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE); 972 } 973 974 void fbnic_write_ip_addr(struct fbnic_dev *fbd) 975 { 976 int idx; 977 978 for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) { 979 struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx]; 980 981 /* Check if update flag is set else skip. */ 982 if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) 983 continue; 984 985 /* Clear by writing 0s. */ 986 if (ip_addr->state == FBNIC_TCAM_S_DELETE) { 987 /* Invalidate entry and clear addr state info */ 988 fbnic_clear_ip_src_entry(fbd, idx); 989 memset(ip_addr, 0, sizeof(*ip_addr)); 990 991 continue; 992 } 993 994 fbnic_write_ip_src_entry(fbd, idx, ip_addr); 995 996 ip_addr->state = FBNIC_TCAM_S_VALID; 997 } 998 999 /* Repeat process for other IP TCAMs */ 1000 for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) { 1001 struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx]; 1002 1003 if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) 1004 continue; 1005 1006 if (ip_addr->state == FBNIC_TCAM_S_DELETE) { 1007 fbnic_clear_ip_dst_entry(fbd, idx); 1008 memset(ip_addr, 0, sizeof(*ip_addr)); 1009 1010 continue; 1011 } 1012 1013 fbnic_write_ip_dst_entry(fbd, idx, ip_addr); 1014 1015 ip_addr->state = FBNIC_TCAM_S_VALID; 1016 } 1017 1018 for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) { 1019 struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx]; 1020 1021 if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) 1022 continue; 1023 1024 if (ip_addr->state == FBNIC_TCAM_S_DELETE) { 1025 fbnic_clear_ip_outer_src_entry(fbd, idx); 1026 memset(ip_addr, 0, sizeof(*ip_addr)); 1027 1028 continue; 1029 } 1030 1031 fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr); 1032 1033 ip_addr->state = FBNIC_TCAM_S_VALID; 1034 } 1035 1036 for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) { 1037 struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx]; 1038 1039 if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) 1040 continue; 1041 1042 if (ip_addr->state == FBNIC_TCAM_S_DELETE) { 1043 fbnic_clear_ip_outer_dst_entry(fbd, idx); 1044 memset(ip_addr, 0, sizeof(*ip_addr)); 1045 1046 continue; 1047 } 1048 1049 fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr); 1050 1051 ip_addr->state = FBNIC_TCAM_S_VALID; 1052 } 1053 } 1054 1055 void fbnic_clear_rules(struct fbnic_dev *fbd) 1056 { 1057 u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, 1058 FBNIC_RPC_ACT_TBL0_DEST_BMC); 1059 int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1; 1060 struct fbnic_act_tcam *act_tcam; 1061 1062 /* Clear MAC rules */ 1063 fbnic_clear_macda(fbd); 1064 1065 /* If BMC is present we need to preserve the last rule which 1066 * will be used to route traffic to the BMC if it is received. 1067 * 1068 * At this point it should be the only MAC address in the MACDA 1069 * so any unicast or multicast traffic received should be routed 1070 * to it. So leave the last rule in place. 1071 * 1072 * It will be rewritten to add the host again when we bring 1073 * the interface back up. 1074 */ 1075 if (fbnic_bmc_present(fbd)) { 1076 act_tcam = &fbd->act_tcam[i]; 1077 1078 if (act_tcam->state == FBNIC_TCAM_S_VALID && 1079 (act_tcam->dest & dest)) { 1080 wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest); 1081 wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0); 1082 1083 act_tcam->state = FBNIC_TCAM_S_UPDATE; 1084 1085 i--; 1086 } 1087 } 1088 1089 /* Work from the bottom up deleting all other rules from hardware */ 1090 do { 1091 act_tcam = &fbd->act_tcam[i]; 1092 1093 if (act_tcam->state != FBNIC_TCAM_S_VALID) 1094 continue; 1095 1096 fbnic_clear_act_tcam(fbd, i); 1097 act_tcam->state = FBNIC_TCAM_S_UPDATE; 1098 } while (i--); 1099 } 1100 1101 static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx) 1102 { 1103 fbnic_clear_act_tcam(fbd, idx); 1104 memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam)); 1105 } 1106 1107 static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx) 1108 { 1109 struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; 1110 int i; 1111 1112 /* Update entry by writing the destination and RSS mask */ 1113 wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest); 1114 wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask); 1115 1116 /* Write new TCAM rule to hardware */ 1117 for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) 1118 wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 1119 FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK, 1120 act_tcam->mask.tcam[i]) | 1121 FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE, 1122 act_tcam->value.tcam[i])); 1123 1124 wrfl(fbd); 1125 1126 wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE); 1127 act_tcam->state = FBNIC_TCAM_S_VALID; 1128 } 1129 1130 void fbnic_write_rules(struct fbnic_dev *fbd) 1131 { 1132 int i; 1133 1134 /* Flush any pending action table rules */ 1135 for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) { 1136 struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i]; 1137 1138 /* Check if update flag is set else exit. */ 1139 if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE)) 1140 continue; 1141 1142 if (act_tcam->state == FBNIC_TCAM_S_DELETE) 1143 fbnic_delete_act_tcam(fbd, i); 1144 else 1145 fbnic_update_act_tcam(fbd, i); 1146 } 1147 } 1148