1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* /proc/net/ support for AF_RXRPC 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/module.h> 9 #include <net/sock.h> 10 #include <net/af_rxrpc.h> 11 #include "ar-internal.h" 12 13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { 14 [RXRPC_CONN_UNUSED] = "Unused ", 15 [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ", 16 [RXRPC_CONN_CLIENT] = "Client ", 17 [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc", 18 [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ", 19 [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ", 20 [RXRPC_CONN_SERVICE] = "SvSecure", 21 [RXRPC_CONN_ABORTED] = "Aborted ", 22 }; 23 24 /* 25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls 26 */ 27 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) 28 __acquires(rcu) 29 { 30 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 31 32 rcu_read_lock(); 33 return seq_list_start_head_rcu(&rxnet->calls, *_pos); 34 } 35 36 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) 37 { 38 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 39 40 return seq_list_next_rcu(v, &rxnet->calls, pos); 41 } 42 43 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) 44 __releases(rcu) 45 { 46 rcu_read_unlock(); 47 } 48 49 static int rxrpc_call_seq_show(struct seq_file *seq, void *v) 50 { 51 struct rxrpc_local *local; 52 struct rxrpc_call *call; 53 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 54 enum rxrpc_call_state state; 55 rxrpc_seq_t tx_bottom; 56 char lbuff[50], rbuff[50]; 57 long timeout = 0; 58 59 if (v == &rxnet->calls) { 60 seq_puts(seq, 61 "Proto Local " 62 " Remote " 63 " SvID ConnID CallID End Use State Abort " 64 " DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n"); 65 return 0; 66 } 67 68 call = list_entry(v, struct rxrpc_call, link); 69 70 local = call->local; 71 if (local) 72 sprintf(lbuff, "%pISpc", &local->srx.transport); 73 else 74 strcpy(lbuff, "no_local"); 75 76 sprintf(rbuff, "%pISpc", &call->dest_srx.transport); 77 78 state = rxrpc_call_state(call); 79 if (state != RXRPC_CALL_SERVER_PREALLOC) 80 timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real()); 81 82 tx_bottom = READ_ONCE(call->tx_bottom); 83 seq_printf(seq, 84 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" 85 " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n", 86 lbuff, 87 rbuff, 88 call->dest_srx.srx_service, 89 call->cid, 90 call->call_id, 91 rxrpc_is_service_call(call) ? "Svc" : "Clt", 92 refcount_read(&call->ref), 93 rxrpc_call_states[state], 94 call->abort_code, 95 call->debug_id, 96 tx_bottom, READ_ONCE(call->tx_top) - tx_bottom, 97 call->ackr_window, call->ackr_wtop - call->ackr_window, 98 call->rx_serial, 99 call->cong_cwnd, 100 timeout); 101 102 return 0; 103 } 104 105 const struct seq_operations rxrpc_call_seq_ops = { 106 .start = rxrpc_call_seq_start, 107 .next = rxrpc_call_seq_next, 108 .stop = rxrpc_call_seq_stop, 109 .show = rxrpc_call_seq_show, 110 }; 111 112 /* 113 * generate a list of extant virtual connections in /proc/net/rxrpc_conns 114 */ 115 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) 116 __acquires(rxnet->conn_lock) 117 { 118 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 119 120 read_lock(&rxnet->conn_lock); 121 return seq_list_start_head(&rxnet->conn_proc_list, *_pos); 122 } 123 124 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, 125 loff_t *pos) 126 { 127 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 128 129 return seq_list_next(v, &rxnet->conn_proc_list, pos); 130 } 131 132 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) 133 __releases(rxnet->conn_lock) 134 { 135 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 136 137 read_unlock(&rxnet->conn_lock); 138 } 139 140 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) 141 { 142 struct rxrpc_connection *conn; 143 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 144 const char *state; 145 char lbuff[50], rbuff[50]; 146 147 if (v == &rxnet->conn_proc_list) { 148 seq_puts(seq, 149 "Proto Local " 150 " Remote " 151 " SvID ConnID End Ref Act State Key " 152 " Serial ISerial CallId0 CallId1 CallId2 CallId3\n" 153 ); 154 return 0; 155 } 156 157 conn = list_entry(v, struct rxrpc_connection, proc_link); 158 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) { 159 strcpy(lbuff, "no_local"); 160 strcpy(rbuff, "no_connection"); 161 goto print; 162 } 163 164 sprintf(lbuff, "%pISpc", &conn->local->srx.transport); 165 sprintf(rbuff, "%pISpc", &conn->peer->srx.transport); 166 print: 167 state = rxrpc_is_conn_aborted(conn) ? 168 rxrpc_call_completions[conn->completion] : 169 rxrpc_conn_states[conn->state]; 170 seq_printf(seq, 171 "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d" 172 " %s %08x %08x %08x %08x %08x %08x %08x\n", 173 lbuff, 174 rbuff, 175 conn->service_id, 176 conn->proto.cid, 177 rxrpc_conn_is_service(conn) ? "Svc" : "Clt", 178 refcount_read(&conn->ref), 179 atomic_read(&conn->active), 180 state, 181 key_serial(conn->key), 182 conn->tx_serial, 183 conn->hi_serial, 184 conn->channels[0].call_id, 185 conn->channels[1].call_id, 186 conn->channels[2].call_id, 187 conn->channels[3].call_id); 188 189 return 0; 190 } 191 192 const struct seq_operations rxrpc_connection_seq_ops = { 193 .start = rxrpc_connection_seq_start, 194 .next = rxrpc_connection_seq_next, 195 .stop = rxrpc_connection_seq_stop, 196 .show = rxrpc_connection_seq_show, 197 }; 198 199 /* 200 * generate a list of extant virtual bundles in /proc/net/rxrpc/bundles 201 */ 202 static void *rxrpc_bundle_seq_start(struct seq_file *seq, loff_t *_pos) 203 __acquires(rxnet->conn_lock) 204 { 205 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 206 207 read_lock(&rxnet->conn_lock); 208 return seq_list_start_head(&rxnet->bundle_proc_list, *_pos); 209 } 210 211 static void *rxrpc_bundle_seq_next(struct seq_file *seq, void *v, 212 loff_t *pos) 213 { 214 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 215 216 return seq_list_next(v, &rxnet->bundle_proc_list, pos); 217 } 218 219 static void rxrpc_bundle_seq_stop(struct seq_file *seq, void *v) 220 __releases(rxnet->conn_lock) 221 { 222 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 223 224 read_unlock(&rxnet->conn_lock); 225 } 226 227 static int rxrpc_bundle_seq_show(struct seq_file *seq, void *v) 228 { 229 struct rxrpc_bundle *bundle; 230 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 231 char lbuff[50], rbuff[50]; 232 233 if (v == &rxnet->bundle_proc_list) { 234 seq_puts(seq, 235 "Proto Local " 236 " Remote " 237 " SvID Ref Act Flg Key |" 238 " Bundle Conn_0 Conn_1 Conn_2 Conn_3\n" 239 ); 240 return 0; 241 } 242 243 bundle = list_entry(v, struct rxrpc_bundle, proc_link); 244 245 sprintf(lbuff, "%pISpc", &bundle->local->srx.transport); 246 sprintf(rbuff, "%pISpc", &bundle->peer->srx.transport); 247 seq_printf(seq, 248 "UDP %-47.47s %-47.47s %4x %3u %3d" 249 " %c%c%c %08x | %08x %08x %08x %08x %08x\n", 250 lbuff, 251 rbuff, 252 bundle->service_id, 253 refcount_read(&bundle->ref), 254 atomic_read(&bundle->active), 255 bundle->try_upgrade ? 'U' : '-', 256 bundle->exclusive ? 'e' : '-', 257 bundle->upgrade ? 'u' : '-', 258 key_serial(bundle->key), 259 bundle->debug_id, 260 bundle->conn_ids[0], 261 bundle->conn_ids[1], 262 bundle->conn_ids[2], 263 bundle->conn_ids[3]); 264 265 return 0; 266 } 267 268 const struct seq_operations rxrpc_bundle_seq_ops = { 269 .start = rxrpc_bundle_seq_start, 270 .next = rxrpc_bundle_seq_next, 271 .stop = rxrpc_bundle_seq_stop, 272 .show = rxrpc_bundle_seq_show, 273 }; 274 275 /* 276 * generate a list of extant virtual peers in /proc/net/rxrpc/peers 277 */ 278 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) 279 { 280 struct rxrpc_peer *peer; 281 time64_t now; 282 char lbuff[50], rbuff[50]; 283 284 if (v == SEQ_START_TOKEN) { 285 seq_puts(seq, 286 "Proto Local Remote Use SST Maxd LastUse RTT RTO\n" 287 ); 288 return 0; 289 } 290 291 peer = list_entry(v, struct rxrpc_peer, hash_link); 292 293 sprintf(lbuff, "%pISpc", &peer->local->srx.transport); 294 295 sprintf(rbuff, "%pISpc", &peer->srx.transport); 296 297 now = ktime_get_seconds(); 298 seq_printf(seq, 299 "UDP %-47.47s %-47.47s %3u %4u %5u %6llus %8d %8d\n", 300 lbuff, 301 rbuff, 302 refcount_read(&peer->ref), 303 peer->cong_ssthresh, 304 peer->max_data, 305 now - peer->last_tx_at, 306 READ_ONCE(peer->recent_srtt_us), 307 READ_ONCE(peer->recent_rto_us)); 308 309 return 0; 310 } 311 312 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos) 313 __acquires(rcu) 314 { 315 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 316 unsigned int bucket, n; 317 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); 318 void *p; 319 320 rcu_read_lock(); 321 322 if (*_pos >= UINT_MAX) 323 return NULL; 324 325 n = *_pos & ((1U << shift) - 1); 326 bucket = *_pos >> shift; 327 for (;;) { 328 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { 329 *_pos = UINT_MAX; 330 return NULL; 331 } 332 if (n == 0) { 333 if (bucket == 0) 334 return SEQ_START_TOKEN; 335 *_pos += 1; 336 n++; 337 } 338 339 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); 340 if (p) 341 return p; 342 bucket++; 343 n = 1; 344 *_pos = (bucket << shift) | n; 345 } 346 } 347 348 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos) 349 { 350 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 351 unsigned int bucket, n; 352 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); 353 void *p; 354 355 if (*_pos >= UINT_MAX) 356 return NULL; 357 358 bucket = *_pos >> shift; 359 360 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); 361 if (p) 362 return p; 363 364 for (;;) { 365 bucket++; 366 n = 1; 367 *_pos = (bucket << shift) | n; 368 369 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { 370 *_pos = UINT_MAX; 371 return NULL; 372 } 373 if (n == 0) { 374 *_pos += 1; 375 n++; 376 } 377 378 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); 379 if (p) 380 return p; 381 } 382 } 383 384 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v) 385 __releases(rcu) 386 { 387 rcu_read_unlock(); 388 } 389 390 391 const struct seq_operations rxrpc_peer_seq_ops = { 392 .start = rxrpc_peer_seq_start, 393 .next = rxrpc_peer_seq_next, 394 .stop = rxrpc_peer_seq_stop, 395 .show = rxrpc_peer_seq_show, 396 }; 397 398 /* 399 * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals 400 */ 401 static int rxrpc_local_seq_show(struct seq_file *seq, void *v) 402 { 403 struct rxrpc_local *local; 404 char lbuff[50]; 405 406 if (v == SEQ_START_TOKEN) { 407 seq_puts(seq, 408 "Proto Local " 409 " Use Act RxQ\n"); 410 return 0; 411 } 412 413 local = hlist_entry(v, struct rxrpc_local, link); 414 415 sprintf(lbuff, "%pISpc", &local->srx.transport); 416 417 seq_printf(seq, 418 "UDP %-47.47s %3u %3u %3u\n", 419 lbuff, 420 refcount_read(&local->ref), 421 atomic_read(&local->active_users), 422 local->rx_queue.qlen); 423 424 return 0; 425 } 426 427 static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos) 428 __acquires(rcu) 429 { 430 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 431 unsigned int n; 432 433 rcu_read_lock(); 434 435 if (*_pos >= UINT_MAX) 436 return NULL; 437 438 n = *_pos; 439 if (n == 0) 440 return SEQ_START_TOKEN; 441 442 return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1); 443 } 444 445 static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos) 446 { 447 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 448 449 if (*_pos >= UINT_MAX) 450 return NULL; 451 452 return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos); 453 } 454 455 static void rxrpc_local_seq_stop(struct seq_file *seq, void *v) 456 __releases(rcu) 457 { 458 rcu_read_unlock(); 459 } 460 461 const struct seq_operations rxrpc_local_seq_ops = { 462 .start = rxrpc_local_seq_start, 463 .next = rxrpc_local_seq_next, 464 .stop = rxrpc_local_seq_stop, 465 .show = rxrpc_local_seq_show, 466 }; 467 468 /* 469 * Display stats in /proc/net/rxrpc/stats 470 */ 471 int rxrpc_stats_show(struct seq_file *seq, void *v) 472 { 473 struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq)); 474 475 seq_printf(seq, 476 "Data : send=%u sendf=%u fail=%u emsz=%u\n", 477 atomic_read(&rxnet->stat_tx_data_send), 478 atomic_read(&rxnet->stat_tx_data_send_frag), 479 atomic_read(&rxnet->stat_tx_data_send_fail), 480 atomic_read(&rxnet->stat_tx_data_send_msgsize)); 481 seq_printf(seq, 482 "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n", 483 atomic_read(&rxnet->stat_tx_data), 484 atomic_read(&rxnet->stat_tx_data_retrans), 485 atomic_read(&rxnet->stat_tx_data_underflow), 486 atomic_read(&rxnet->stat_tx_data_cwnd_reset)); 487 seq_printf(seq, 488 "Data-Rx : nr=%u reqack=%u jumbo=%u\n", 489 atomic_read(&rxnet->stat_rx_data), 490 atomic_read(&rxnet->stat_rx_data_reqack), 491 atomic_read(&rxnet->stat_rx_data_jumbo)); 492 seq_printf(seq, 493 "Ack : fill=%u send=%u skip=%u\n", 494 atomic_read(&rxnet->stat_tx_ack_fill), 495 atomic_read(&rxnet->stat_tx_ack_send), 496 atomic_read(&rxnet->stat_tx_ack_skip)); 497 seq_printf(seq, 498 "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n", 499 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]), 500 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]), 501 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), 502 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), 503 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]), 504 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]), 505 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]), 506 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]), 507 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE])); 508 seq_printf(seq, 509 "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u z=%u\n", 510 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]), 511 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]), 512 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), 513 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), 514 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]), 515 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]), 516 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]), 517 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]), 518 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]), 519 atomic_read(&rxnet->stat_rx_acks[0])); 520 seq_printf(seq, 521 "Why-Req-A: acklost=%u mrtt=%u ortt=%u stall=%u\n", 522 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]), 523 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]), 524 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]), 525 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_app_stall])); 526 seq_printf(seq, 527 "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n", 528 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]), 529 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]), 530 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]), 531 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin])); 532 seq_printf(seq, 533 "Jumbo-Tx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n", 534 atomic_read(&rxnet->stat_tx_jumbo[0]), 535 atomic_read(&rxnet->stat_tx_jumbo[1]), 536 atomic_read(&rxnet->stat_tx_jumbo[2]), 537 atomic_read(&rxnet->stat_tx_jumbo[3]), 538 atomic_read(&rxnet->stat_tx_jumbo[4]), 539 atomic_read(&rxnet->stat_tx_jumbo[5]), 540 atomic_read(&rxnet->stat_tx_jumbo[6]), 541 atomic_read(&rxnet->stat_tx_jumbo[7]), 542 atomic_read(&rxnet->stat_tx_jumbo[8]), 543 atomic_read(&rxnet->stat_tx_jumbo[9])); 544 seq_printf(seq, 545 "Jumbo-Rx : %u,%u,%u,%u,%u,%u,%u,%u,%u,%u\n", 546 atomic_read(&rxnet->stat_rx_jumbo[0]), 547 atomic_read(&rxnet->stat_rx_jumbo[1]), 548 atomic_read(&rxnet->stat_rx_jumbo[2]), 549 atomic_read(&rxnet->stat_rx_jumbo[3]), 550 atomic_read(&rxnet->stat_rx_jumbo[4]), 551 atomic_read(&rxnet->stat_rx_jumbo[5]), 552 atomic_read(&rxnet->stat_rx_jumbo[6]), 553 atomic_read(&rxnet->stat_rx_jumbo[7]), 554 atomic_read(&rxnet->stat_rx_jumbo[8]), 555 atomic_read(&rxnet->stat_rx_jumbo[9])); 556 seq_printf(seq, 557 "Buffers : txb=%u rxb=%u\n", 558 atomic_read(&rxrpc_nr_txbuf), 559 atomic_read(&rxrpc_n_rx_skbs)); 560 seq_printf(seq, 561 "IO-thread: loops=%u\n", 562 atomic_read(&rxnet->stat_io_loop)); 563 return 0; 564 } 565 566 /* 567 * Clear stats if /proc/net/rxrpc/stats is written to. 568 */ 569 int rxrpc_stats_clear(struct file *file, char *buf, size_t size) 570 { 571 struct seq_file *m = file->private_data; 572 struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m)); 573 574 if (size > 1 || (size == 1 && buf[0] != '\n')) 575 return -EINVAL; 576 577 atomic_set(&rxnet->stat_tx_data, 0); 578 atomic_set(&rxnet->stat_tx_data_retrans, 0); 579 atomic_set(&rxnet->stat_tx_data_underflow, 0); 580 atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0); 581 atomic_set(&rxnet->stat_tx_data_send, 0); 582 atomic_set(&rxnet->stat_tx_data_send_frag, 0); 583 atomic_set(&rxnet->stat_tx_data_send_fail, 0); 584 atomic_set(&rxnet->stat_rx_data, 0); 585 atomic_set(&rxnet->stat_rx_data_reqack, 0); 586 atomic_set(&rxnet->stat_rx_data_jumbo, 0); 587 588 atomic_set(&rxnet->stat_tx_ack_fill, 0); 589 atomic_set(&rxnet->stat_tx_ack_send, 0); 590 atomic_set(&rxnet->stat_tx_ack_skip, 0); 591 memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks)); 592 memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks)); 593 memset(&rxnet->stat_tx_jumbo, 0, sizeof(rxnet->stat_tx_jumbo)); 594 memset(&rxnet->stat_rx_jumbo, 0, sizeof(rxnet->stat_rx_jumbo)); 595 596 memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack)); 597 598 atomic_set(&rxnet->stat_io_loop, 0); 599 return size; 600 } 601