1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/rcupdate.h> 32 #include <linux/interrupt.h> 33 #include <linux/moduleparam.h> 34 #include <linux/workqueue.h> 35 #include <linux/uuid.h> 36 #include <linux/nospec.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 40 #define IPMI_DRIVER_VERSION "39.2" 41 42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43 static int ipmi_init_msghandler(void); 44 static void smi_recv_tasklet(struct tasklet_struct *t); 45 static void handle_new_recv_msgs(struct ipmi_smi *intf); 46 static void need_waiter(struct ipmi_smi *intf); 47 static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* The default maximum number of users that may register. */ 149 static unsigned int max_users = 30; 150 module_param(max_users, uint, 0644); 151 MODULE_PARM_DESC(max_users, 152 "The most users that may use the IPMI stack at one time."); 153 154 /* Call every ~1000 ms. */ 155 #define IPMI_TIMEOUT_TIME 1000 156 157 /* How many jiffies does it take to get to the timeout time. */ 158 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 159 160 /* 161 * Request events from the queue every second (this is the number of 162 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 163 * future, IPMI will add a way to know immediately if an event is in 164 * the queue and this silliness can go away. 165 */ 166 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 167 168 /* How long should we cache dynamic device IDs? */ 169 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 170 171 /* 172 * The main "user" data structure. 173 */ 174 struct ipmi_user { 175 struct list_head link; 176 177 /* 178 * Set to NULL when the user is destroyed, a pointer to myself 179 * so srcu_dereference can be used on it. 180 */ 181 struct ipmi_user *self; 182 struct srcu_struct release_barrier; 183 184 struct kref refcount; 185 186 /* The upper layer that handles receive messages. */ 187 const struct ipmi_user_hndl *handler; 188 void *handler_data; 189 190 /* The interface this user is bound to. */ 191 struct ipmi_smi *intf; 192 193 /* Does this interface receive IPMI events? */ 194 bool gets_events; 195 196 /* Free must run in process context for RCU cleanup. */ 197 struct work_struct remove_work; 198 }; 199 200 static struct workqueue_struct *remove_work_wq; 201 202 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 203 __acquires(user->release_barrier) 204 { 205 struct ipmi_user *ruser; 206 207 *index = srcu_read_lock(&user->release_barrier); 208 ruser = srcu_dereference(user->self, &user->release_barrier); 209 if (!ruser) 210 srcu_read_unlock(&user->release_barrier, *index); 211 return ruser; 212 } 213 214 static void release_ipmi_user(struct ipmi_user *user, int index) 215 { 216 srcu_read_unlock(&user->release_barrier, index); 217 } 218 219 struct cmd_rcvr { 220 struct list_head link; 221 222 struct ipmi_user *user; 223 unsigned char netfn; 224 unsigned char cmd; 225 unsigned int chans; 226 227 /* 228 * This is used to form a linked lised during mass deletion. 229 * Since this is in an RCU list, we cannot use the link above 230 * or change any data until the RCU period completes. So we 231 * use this next variable during mass deletion so we can have 232 * a list and don't have to wait and restart the search on 233 * every individual deletion of a command. 234 */ 235 struct cmd_rcvr *next; 236 }; 237 238 struct seq_table { 239 unsigned int inuse : 1; 240 unsigned int broadcast : 1; 241 242 unsigned long timeout; 243 unsigned long orig_timeout; 244 unsigned int retries_left; 245 246 /* 247 * To verify on an incoming send message response that this is 248 * the message that the response is for, we keep a sequence id 249 * and increment it every time we send a message. 250 */ 251 long seqid; 252 253 /* 254 * This is held so we can properly respond to the message on a 255 * timeout, and it is used to hold the temporary data for 256 * retransmission, too. 257 */ 258 struct ipmi_recv_msg *recv_msg; 259 }; 260 261 /* 262 * Store the information in a msgid (long) to allow us to find a 263 * sequence table entry from the msgid. 264 */ 265 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 266 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 267 268 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 269 do { \ 270 seq = (((msgid) >> 26) & 0x3f); \ 271 seqid = ((msgid) & 0x3ffffff); \ 272 } while (0) 273 274 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 275 276 #define IPMI_MAX_CHANNELS 16 277 struct ipmi_channel { 278 unsigned char medium; 279 unsigned char protocol; 280 }; 281 282 struct ipmi_channel_set { 283 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 284 }; 285 286 struct ipmi_my_addrinfo { 287 /* 288 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 289 * but may be changed by the user. 290 */ 291 unsigned char address; 292 293 /* 294 * My LUN. This should generally stay the SMS LUN, but just in 295 * case... 296 */ 297 unsigned char lun; 298 }; 299 300 /* 301 * Note that the product id, manufacturer id, guid, and device id are 302 * immutable in this structure, so dyn_mutex is not required for 303 * accessing those. If those change on a BMC, a new BMC is allocated. 304 */ 305 struct bmc_device { 306 struct platform_device pdev; 307 struct list_head intfs; /* Interfaces on this BMC. */ 308 struct ipmi_device_id id; 309 struct ipmi_device_id fetch_id; 310 int dyn_id_set; 311 unsigned long dyn_id_expiry; 312 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 313 guid_t guid; 314 guid_t fetch_guid; 315 int dyn_guid_set; 316 struct kref usecount; 317 struct work_struct remove_work; 318 unsigned char cc; /* completion code */ 319 }; 320 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 321 322 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 323 struct ipmi_device_id *id, 324 bool *guid_set, guid_t *guid); 325 326 /* 327 * Various statistics for IPMI, these index stats[] in the ipmi_smi 328 * structure. 329 */ 330 enum ipmi_stat_indexes { 331 /* Commands we got from the user that were invalid. */ 332 IPMI_STAT_sent_invalid_commands = 0, 333 334 /* Commands we sent to the MC. */ 335 IPMI_STAT_sent_local_commands, 336 337 /* Responses from the MC that were delivered to a user. */ 338 IPMI_STAT_handled_local_responses, 339 340 /* Responses from the MC that were not delivered to a user. */ 341 IPMI_STAT_unhandled_local_responses, 342 343 /* Commands we sent out to the IPMB bus. */ 344 IPMI_STAT_sent_ipmb_commands, 345 346 /* Commands sent on the IPMB that had errors on the SEND CMD */ 347 IPMI_STAT_sent_ipmb_command_errs, 348 349 /* Each retransmit increments this count. */ 350 IPMI_STAT_retransmitted_ipmb_commands, 351 352 /* 353 * When a message times out (runs out of retransmits) this is 354 * incremented. 355 */ 356 IPMI_STAT_timed_out_ipmb_commands, 357 358 /* 359 * This is like above, but for broadcasts. Broadcasts are 360 * *not* included in the above count (they are expected to 361 * time out). 362 */ 363 IPMI_STAT_timed_out_ipmb_broadcasts, 364 365 /* Responses I have sent to the IPMB bus. */ 366 IPMI_STAT_sent_ipmb_responses, 367 368 /* The response was delivered to the user. */ 369 IPMI_STAT_handled_ipmb_responses, 370 371 /* The response had invalid data in it. */ 372 IPMI_STAT_invalid_ipmb_responses, 373 374 /* The response didn't have anyone waiting for it. */ 375 IPMI_STAT_unhandled_ipmb_responses, 376 377 /* Commands we sent out to the IPMB bus. */ 378 IPMI_STAT_sent_lan_commands, 379 380 /* Commands sent on the IPMB that had errors on the SEND CMD */ 381 IPMI_STAT_sent_lan_command_errs, 382 383 /* Each retransmit increments this count. */ 384 IPMI_STAT_retransmitted_lan_commands, 385 386 /* 387 * When a message times out (runs out of retransmits) this is 388 * incremented. 389 */ 390 IPMI_STAT_timed_out_lan_commands, 391 392 /* Responses I have sent to the IPMB bus. */ 393 IPMI_STAT_sent_lan_responses, 394 395 /* The response was delivered to the user. */ 396 IPMI_STAT_handled_lan_responses, 397 398 /* The response had invalid data in it. */ 399 IPMI_STAT_invalid_lan_responses, 400 401 /* The response didn't have anyone waiting for it. */ 402 IPMI_STAT_unhandled_lan_responses, 403 404 /* The command was delivered to the user. */ 405 IPMI_STAT_handled_commands, 406 407 /* The command had invalid data in it. */ 408 IPMI_STAT_invalid_commands, 409 410 /* The command didn't have anyone waiting for it. */ 411 IPMI_STAT_unhandled_commands, 412 413 /* Invalid data in an event. */ 414 IPMI_STAT_invalid_events, 415 416 /* Events that were received with the proper format. */ 417 IPMI_STAT_events, 418 419 /* Retransmissions on IPMB that failed. */ 420 IPMI_STAT_dropped_rexmit_ipmb_commands, 421 422 /* Retransmissions on LAN that failed. */ 423 IPMI_STAT_dropped_rexmit_lan_commands, 424 425 /* This *must* remain last, add new values above this. */ 426 IPMI_NUM_STATS 427 }; 428 429 430 #define IPMI_IPMB_NUM_SEQ 64 431 struct ipmi_smi { 432 struct module *owner; 433 434 /* What interface number are we? */ 435 int intf_num; 436 437 struct kref refcount; 438 439 /* Set when the interface is being unregistered. */ 440 bool in_shutdown; 441 442 /* Used for a list of interfaces. */ 443 struct list_head link; 444 445 /* 446 * The list of upper layers that are using me. seq_lock write 447 * protects this. Read protection is with srcu. 448 */ 449 struct list_head users; 450 struct srcu_struct users_srcu; 451 atomic_t nr_users; 452 453 /* Used for wake ups at startup. */ 454 wait_queue_head_t waitq; 455 456 /* 457 * Prevents the interface from being unregistered when the 458 * interface is used by being looked up through the BMC 459 * structure. 460 */ 461 struct mutex bmc_reg_mutex; 462 463 struct bmc_device tmp_bmc; 464 struct bmc_device *bmc; 465 bool bmc_registered; 466 struct list_head bmc_link; 467 char *my_dev_name; 468 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 469 struct work_struct bmc_reg_work; 470 471 const struct ipmi_smi_handlers *handlers; 472 void *send_info; 473 474 /* Driver-model device for the system interface. */ 475 struct device *si_dev; 476 477 /* 478 * A table of sequence numbers for this interface. We use the 479 * sequence numbers for IPMB messages that go out of the 480 * interface to match them up with their responses. A routine 481 * is called periodically to time the items in this list. 482 */ 483 spinlock_t seq_lock; 484 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 485 int curr_seq; 486 487 /* 488 * Messages queued for delivery. If delivery fails (out of memory 489 * for instance), They will stay in here to be processed later in a 490 * periodic timer interrupt. The tasklet is for handling received 491 * messages directly from the handler. 492 */ 493 spinlock_t waiting_rcv_msgs_lock; 494 struct list_head waiting_rcv_msgs; 495 atomic_t watchdog_pretimeouts_to_deliver; 496 struct tasklet_struct recv_tasklet; 497 498 spinlock_t xmit_msgs_lock; 499 struct list_head xmit_msgs; 500 struct ipmi_smi_msg *curr_msg; 501 struct list_head hp_xmit_msgs; 502 503 /* 504 * The list of command receivers that are registered for commands 505 * on this interface. 506 */ 507 struct mutex cmd_rcvrs_mutex; 508 struct list_head cmd_rcvrs; 509 510 /* 511 * Events that were queues because no one was there to receive 512 * them. 513 */ 514 spinlock_t events_lock; /* For dealing with event stuff. */ 515 struct list_head waiting_events; 516 unsigned int waiting_events_count; /* How many events in queue? */ 517 char delivering_events; 518 char event_msg_printed; 519 520 /* How many users are waiting for events? */ 521 atomic_t event_waiters; 522 unsigned int ticks_to_req_ev; 523 524 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 525 526 /* How many users are waiting for commands? */ 527 unsigned int command_waiters; 528 529 /* How many users are waiting for watchdogs? */ 530 unsigned int watchdog_waiters; 531 532 /* How many users are waiting for message responses? */ 533 unsigned int response_waiters; 534 535 /* 536 * Tells what the lower layer has last been asked to watch for, 537 * messages and/or watchdogs. Protected by watch_lock. 538 */ 539 unsigned int last_watch_mask; 540 541 /* 542 * The event receiver for my BMC, only really used at panic 543 * shutdown as a place to store this. 544 */ 545 unsigned char event_receiver; 546 unsigned char event_receiver_lun; 547 unsigned char local_sel_device; 548 unsigned char local_event_generator; 549 550 /* For handling of maintenance mode. */ 551 int maintenance_mode; 552 bool maintenance_mode_enable; 553 int auto_maintenance_timeout; 554 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 555 556 /* 557 * If we are doing maintenance on something on IPMB, extend 558 * the timeout time to avoid timeouts writing firmware and 559 * such. 560 */ 561 int ipmb_maintenance_mode_timeout; 562 563 /* 564 * A cheap hack, if this is non-null and a message to an 565 * interface comes in with a NULL user, call this routine with 566 * it. Note that the message will still be freed by the 567 * caller. This only works on the system interface. 568 * 569 * Protected by bmc_reg_mutex. 570 */ 571 void (*null_user_handler)(struct ipmi_smi *intf, 572 struct ipmi_recv_msg *msg); 573 574 /* 575 * When we are scanning the channels for an SMI, this will 576 * tell which channel we are scanning. 577 */ 578 int curr_channel; 579 580 /* Channel information */ 581 struct ipmi_channel_set *channel_list; 582 unsigned int curr_working_cset; /* First index into the following. */ 583 struct ipmi_channel_set wchannels[2]; 584 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 585 bool channels_ready; 586 587 atomic_t stats[IPMI_NUM_STATS]; 588 589 /* 590 * run_to_completion duplicate of smb_info, smi_info 591 * and ipmi_serial_info structures. Used to decrease numbers of 592 * parameters passed by "low" level IPMI code. 593 */ 594 int run_to_completion; 595 }; 596 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 597 598 static void __get_guid(struct ipmi_smi *intf); 599 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 600 static int __ipmi_bmc_register(struct ipmi_smi *intf, 601 struct ipmi_device_id *id, 602 bool guid_set, guid_t *guid, int intf_num); 603 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 604 605 606 /** 607 * The driver model view of the IPMI messaging driver. 608 */ 609 static struct platform_driver ipmidriver = { 610 .driver = { 611 .name = "ipmi", 612 .bus = &platform_bus_type 613 } 614 }; 615 /* 616 * This mutex keeps us from adding the same BMC twice. 617 */ 618 static DEFINE_MUTEX(ipmidriver_mutex); 619 620 static LIST_HEAD(ipmi_interfaces); 621 static DEFINE_MUTEX(ipmi_interfaces_mutex); 622 #define ipmi_interfaces_mutex_held() \ 623 lockdep_is_held(&ipmi_interfaces_mutex) 624 static struct srcu_struct ipmi_interfaces_srcu; 625 626 /* 627 * List of watchers that want to know when smi's are added and deleted. 628 */ 629 static LIST_HEAD(smi_watchers); 630 static DEFINE_MUTEX(smi_watchers_mutex); 631 632 #define ipmi_inc_stat(intf, stat) \ 633 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 634 #define ipmi_get_stat(intf, stat) \ 635 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 636 637 static const char * const addr_src_to_str[] = { 638 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 639 "device-tree", "platform" 640 }; 641 642 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 643 { 644 if (src >= SI_LAST) 645 src = 0; /* Invalid */ 646 return addr_src_to_str[src]; 647 } 648 EXPORT_SYMBOL(ipmi_addr_src_to_str); 649 650 static int is_lan_addr(struct ipmi_addr *addr) 651 { 652 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 653 } 654 655 static int is_ipmb_addr(struct ipmi_addr *addr) 656 { 657 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 658 } 659 660 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 661 { 662 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 663 } 664 665 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 666 { 667 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 668 } 669 670 static void free_recv_msg_list(struct list_head *q) 671 { 672 struct ipmi_recv_msg *msg, *msg2; 673 674 list_for_each_entry_safe(msg, msg2, q, link) { 675 list_del(&msg->link); 676 ipmi_free_recv_msg(msg); 677 } 678 } 679 680 static void free_smi_msg_list(struct list_head *q) 681 { 682 struct ipmi_smi_msg *msg, *msg2; 683 684 list_for_each_entry_safe(msg, msg2, q, link) { 685 list_del(&msg->link); 686 ipmi_free_smi_msg(msg); 687 } 688 } 689 690 static void clean_up_interface_data(struct ipmi_smi *intf) 691 { 692 int i; 693 struct cmd_rcvr *rcvr, *rcvr2; 694 struct list_head list; 695 696 tasklet_kill(&intf->recv_tasklet); 697 698 free_smi_msg_list(&intf->waiting_rcv_msgs); 699 free_recv_msg_list(&intf->waiting_events); 700 701 /* 702 * Wholesale remove all the entries from the list in the 703 * interface and wait for RCU to know that none are in use. 704 */ 705 mutex_lock(&intf->cmd_rcvrs_mutex); 706 INIT_LIST_HEAD(&list); 707 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 708 mutex_unlock(&intf->cmd_rcvrs_mutex); 709 710 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 711 kfree(rcvr); 712 713 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 714 if ((intf->seq_table[i].inuse) 715 && (intf->seq_table[i].recv_msg)) 716 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 717 } 718 } 719 720 static void intf_free(struct kref *ref) 721 { 722 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 723 724 clean_up_interface_data(intf); 725 kfree(intf); 726 } 727 728 struct watcher_entry { 729 int intf_num; 730 struct ipmi_smi *intf; 731 struct list_head link; 732 }; 733 734 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 735 { 736 struct ipmi_smi *intf; 737 int index, rv; 738 739 /* 740 * Make sure the driver is actually initialized, this handles 741 * problems with initialization order. 742 */ 743 rv = ipmi_init_msghandler(); 744 if (rv) 745 return rv; 746 747 mutex_lock(&smi_watchers_mutex); 748 749 list_add(&watcher->link, &smi_watchers); 750 751 index = srcu_read_lock(&ipmi_interfaces_srcu); 752 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 753 lockdep_is_held(&smi_watchers_mutex)) { 754 int intf_num = READ_ONCE(intf->intf_num); 755 756 if (intf_num == -1) 757 continue; 758 watcher->new_smi(intf_num, intf->si_dev); 759 } 760 srcu_read_unlock(&ipmi_interfaces_srcu, index); 761 762 mutex_unlock(&smi_watchers_mutex); 763 764 return 0; 765 } 766 EXPORT_SYMBOL(ipmi_smi_watcher_register); 767 768 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 769 { 770 mutex_lock(&smi_watchers_mutex); 771 list_del(&watcher->link); 772 mutex_unlock(&smi_watchers_mutex); 773 return 0; 774 } 775 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 776 777 /* 778 * Must be called with smi_watchers_mutex held. 779 */ 780 static void 781 call_smi_watchers(int i, struct device *dev) 782 { 783 struct ipmi_smi_watcher *w; 784 785 mutex_lock(&smi_watchers_mutex); 786 list_for_each_entry(w, &smi_watchers, link) { 787 if (try_module_get(w->owner)) { 788 w->new_smi(i, dev); 789 module_put(w->owner); 790 } 791 } 792 mutex_unlock(&smi_watchers_mutex); 793 } 794 795 static int 796 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 797 { 798 if (addr1->addr_type != addr2->addr_type) 799 return 0; 800 801 if (addr1->channel != addr2->channel) 802 return 0; 803 804 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 805 struct ipmi_system_interface_addr *smi_addr1 806 = (struct ipmi_system_interface_addr *) addr1; 807 struct ipmi_system_interface_addr *smi_addr2 808 = (struct ipmi_system_interface_addr *) addr2; 809 return (smi_addr1->lun == smi_addr2->lun); 810 } 811 812 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 813 struct ipmi_ipmb_addr *ipmb_addr1 814 = (struct ipmi_ipmb_addr *) addr1; 815 struct ipmi_ipmb_addr *ipmb_addr2 816 = (struct ipmi_ipmb_addr *) addr2; 817 818 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 819 && (ipmb_addr1->lun == ipmb_addr2->lun)); 820 } 821 822 if (is_ipmb_direct_addr(addr1)) { 823 struct ipmi_ipmb_direct_addr *daddr1 824 = (struct ipmi_ipmb_direct_addr *) addr1; 825 struct ipmi_ipmb_direct_addr *daddr2 826 = (struct ipmi_ipmb_direct_addr *) addr2; 827 828 return daddr1->slave_addr == daddr2->slave_addr && 829 daddr1->rq_lun == daddr2->rq_lun && 830 daddr1->rs_lun == daddr2->rs_lun; 831 } 832 833 if (is_lan_addr(addr1)) { 834 struct ipmi_lan_addr *lan_addr1 835 = (struct ipmi_lan_addr *) addr1; 836 struct ipmi_lan_addr *lan_addr2 837 = (struct ipmi_lan_addr *) addr2; 838 839 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 840 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 841 && (lan_addr1->session_handle 842 == lan_addr2->session_handle) 843 && (lan_addr1->lun == lan_addr2->lun)); 844 } 845 846 return 1; 847 } 848 849 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 850 { 851 if (len < sizeof(struct ipmi_system_interface_addr)) 852 return -EINVAL; 853 854 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 855 if (addr->channel != IPMI_BMC_CHANNEL) 856 return -EINVAL; 857 return 0; 858 } 859 860 if ((addr->channel == IPMI_BMC_CHANNEL) 861 || (addr->channel >= IPMI_MAX_CHANNELS) 862 || (addr->channel < 0)) 863 return -EINVAL; 864 865 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 866 if (len < sizeof(struct ipmi_ipmb_addr)) 867 return -EINVAL; 868 return 0; 869 } 870 871 if (is_ipmb_direct_addr(addr)) { 872 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 873 874 if (addr->channel != 0) 875 return -EINVAL; 876 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 877 return -EINVAL; 878 879 if (daddr->slave_addr & 0x01) 880 return -EINVAL; 881 if (daddr->rq_lun >= 4) 882 return -EINVAL; 883 if (daddr->rs_lun >= 4) 884 return -EINVAL; 885 return 0; 886 } 887 888 if (is_lan_addr(addr)) { 889 if (len < sizeof(struct ipmi_lan_addr)) 890 return -EINVAL; 891 return 0; 892 } 893 894 return -EINVAL; 895 } 896 EXPORT_SYMBOL(ipmi_validate_addr); 897 898 unsigned int ipmi_addr_length(int addr_type) 899 { 900 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 901 return sizeof(struct ipmi_system_interface_addr); 902 903 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 904 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 905 return sizeof(struct ipmi_ipmb_addr); 906 907 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 908 return sizeof(struct ipmi_ipmb_direct_addr); 909 910 if (addr_type == IPMI_LAN_ADDR_TYPE) 911 return sizeof(struct ipmi_lan_addr); 912 913 return 0; 914 } 915 EXPORT_SYMBOL(ipmi_addr_length); 916 917 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 918 { 919 int rv = 0; 920 921 if (!msg->user) { 922 /* Special handling for NULL users. */ 923 if (intf->null_user_handler) { 924 intf->null_user_handler(intf, msg); 925 } else { 926 /* No handler, so give up. */ 927 rv = -EINVAL; 928 } 929 ipmi_free_recv_msg(msg); 930 } else if (oops_in_progress) { 931 /* 932 * If we are running in the panic context, calling the 933 * receive handler doesn't much meaning and has a deadlock 934 * risk. At this moment, simply skip it in that case. 935 */ 936 ipmi_free_recv_msg(msg); 937 } else { 938 int index; 939 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 940 941 if (user) { 942 user->handler->ipmi_recv_hndl(msg, user->handler_data); 943 release_ipmi_user(user, index); 944 } else { 945 /* User went away, give up. */ 946 ipmi_free_recv_msg(msg); 947 rv = -EINVAL; 948 } 949 } 950 951 return rv; 952 } 953 954 static void deliver_local_response(struct ipmi_smi *intf, 955 struct ipmi_recv_msg *msg) 956 { 957 if (deliver_response(intf, msg)) 958 ipmi_inc_stat(intf, unhandled_local_responses); 959 else 960 ipmi_inc_stat(intf, handled_local_responses); 961 } 962 963 static void deliver_err_response(struct ipmi_smi *intf, 964 struct ipmi_recv_msg *msg, int err) 965 { 966 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 967 msg->msg_data[0] = err; 968 msg->msg.netfn |= 1; /* Convert to a response. */ 969 msg->msg.data_len = 1; 970 msg->msg.data = msg->msg_data; 971 deliver_local_response(intf, msg); 972 } 973 974 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 975 { 976 unsigned long iflags; 977 978 if (!intf->handlers->set_need_watch) 979 return; 980 981 spin_lock_irqsave(&intf->watch_lock, iflags); 982 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 983 intf->response_waiters++; 984 985 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 986 intf->watchdog_waiters++; 987 988 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 989 intf->command_waiters++; 990 991 if ((intf->last_watch_mask & flags) != flags) { 992 intf->last_watch_mask |= flags; 993 intf->handlers->set_need_watch(intf->send_info, 994 intf->last_watch_mask); 995 } 996 spin_unlock_irqrestore(&intf->watch_lock, iflags); 997 } 998 999 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1000 { 1001 unsigned long iflags; 1002 1003 if (!intf->handlers->set_need_watch) 1004 return; 1005 1006 spin_lock_irqsave(&intf->watch_lock, iflags); 1007 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1008 intf->response_waiters--; 1009 1010 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1011 intf->watchdog_waiters--; 1012 1013 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1014 intf->command_waiters--; 1015 1016 flags = 0; 1017 if (intf->response_waiters) 1018 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1019 if (intf->watchdog_waiters) 1020 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1021 if (intf->command_waiters) 1022 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1023 1024 if (intf->last_watch_mask != flags) { 1025 intf->last_watch_mask = flags; 1026 intf->handlers->set_need_watch(intf->send_info, 1027 intf->last_watch_mask); 1028 } 1029 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1030 } 1031 1032 /* 1033 * Find the next sequence number not being used and add the given 1034 * message with the given timeout to the sequence table. This must be 1035 * called with the interface's seq_lock held. 1036 */ 1037 static int intf_next_seq(struct ipmi_smi *intf, 1038 struct ipmi_recv_msg *recv_msg, 1039 unsigned long timeout, 1040 int retries, 1041 int broadcast, 1042 unsigned char *seq, 1043 long *seqid) 1044 { 1045 int rv = 0; 1046 unsigned int i; 1047 1048 if (timeout == 0) 1049 timeout = default_retry_ms; 1050 if (retries < 0) 1051 retries = default_max_retries; 1052 1053 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1054 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1055 if (!intf->seq_table[i].inuse) 1056 break; 1057 } 1058 1059 if (!intf->seq_table[i].inuse) { 1060 intf->seq_table[i].recv_msg = recv_msg; 1061 1062 /* 1063 * Start with the maximum timeout, when the send response 1064 * comes in we will start the real timer. 1065 */ 1066 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1067 intf->seq_table[i].orig_timeout = timeout; 1068 intf->seq_table[i].retries_left = retries; 1069 intf->seq_table[i].broadcast = broadcast; 1070 intf->seq_table[i].inuse = 1; 1071 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1072 *seq = i; 1073 *seqid = intf->seq_table[i].seqid; 1074 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1075 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1076 need_waiter(intf); 1077 } else { 1078 rv = -EAGAIN; 1079 } 1080 1081 return rv; 1082 } 1083 1084 /* 1085 * Return the receive message for the given sequence number and 1086 * release the sequence number so it can be reused. Some other data 1087 * is passed in to be sure the message matches up correctly (to help 1088 * guard against message coming in after their timeout and the 1089 * sequence number being reused). 1090 */ 1091 static int intf_find_seq(struct ipmi_smi *intf, 1092 unsigned char seq, 1093 short channel, 1094 unsigned char cmd, 1095 unsigned char netfn, 1096 struct ipmi_addr *addr, 1097 struct ipmi_recv_msg **recv_msg) 1098 { 1099 int rv = -ENODEV; 1100 unsigned long flags; 1101 1102 if (seq >= IPMI_IPMB_NUM_SEQ) 1103 return -EINVAL; 1104 1105 spin_lock_irqsave(&intf->seq_lock, flags); 1106 if (intf->seq_table[seq].inuse) { 1107 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1108 1109 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1110 && (msg->msg.netfn == netfn) 1111 && (ipmi_addr_equal(addr, &msg->addr))) { 1112 *recv_msg = msg; 1113 intf->seq_table[seq].inuse = 0; 1114 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1115 rv = 0; 1116 } 1117 } 1118 spin_unlock_irqrestore(&intf->seq_lock, flags); 1119 1120 return rv; 1121 } 1122 1123 1124 /* Start the timer for a specific sequence table entry. */ 1125 static int intf_start_seq_timer(struct ipmi_smi *intf, 1126 long msgid) 1127 { 1128 int rv = -ENODEV; 1129 unsigned long flags; 1130 unsigned char seq; 1131 unsigned long seqid; 1132 1133 1134 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1135 1136 spin_lock_irqsave(&intf->seq_lock, flags); 1137 /* 1138 * We do this verification because the user can be deleted 1139 * while a message is outstanding. 1140 */ 1141 if ((intf->seq_table[seq].inuse) 1142 && (intf->seq_table[seq].seqid == seqid)) { 1143 struct seq_table *ent = &intf->seq_table[seq]; 1144 ent->timeout = ent->orig_timeout; 1145 rv = 0; 1146 } 1147 spin_unlock_irqrestore(&intf->seq_lock, flags); 1148 1149 return rv; 1150 } 1151 1152 /* Got an error for the send message for a specific sequence number. */ 1153 static int intf_err_seq(struct ipmi_smi *intf, 1154 long msgid, 1155 unsigned int err) 1156 { 1157 int rv = -ENODEV; 1158 unsigned long flags; 1159 unsigned char seq; 1160 unsigned long seqid; 1161 struct ipmi_recv_msg *msg = NULL; 1162 1163 1164 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1165 1166 spin_lock_irqsave(&intf->seq_lock, flags); 1167 /* 1168 * We do this verification because the user can be deleted 1169 * while a message is outstanding. 1170 */ 1171 if ((intf->seq_table[seq].inuse) 1172 && (intf->seq_table[seq].seqid == seqid)) { 1173 struct seq_table *ent = &intf->seq_table[seq]; 1174 1175 ent->inuse = 0; 1176 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1177 msg = ent->recv_msg; 1178 rv = 0; 1179 } 1180 spin_unlock_irqrestore(&intf->seq_lock, flags); 1181 1182 if (msg) 1183 deliver_err_response(intf, msg, err); 1184 1185 return rv; 1186 } 1187 1188 static void free_user_work(struct work_struct *work) 1189 { 1190 struct ipmi_user *user = container_of(work, struct ipmi_user, 1191 remove_work); 1192 1193 cleanup_srcu_struct(&user->release_barrier); 1194 vfree(user); 1195 } 1196 1197 int ipmi_create_user(unsigned int if_num, 1198 const struct ipmi_user_hndl *handler, 1199 void *handler_data, 1200 struct ipmi_user **user) 1201 { 1202 unsigned long flags; 1203 struct ipmi_user *new_user; 1204 int rv, index; 1205 struct ipmi_smi *intf; 1206 1207 /* 1208 * There is no module usecount here, because it's not 1209 * required. Since this can only be used by and called from 1210 * other modules, they will implicitly use this module, and 1211 * thus this can't be removed unless the other modules are 1212 * removed. 1213 */ 1214 1215 if (handler == NULL) 1216 return -EINVAL; 1217 1218 /* 1219 * Make sure the driver is actually initialized, this handles 1220 * problems with initialization order. 1221 */ 1222 rv = ipmi_init_msghandler(); 1223 if (rv) 1224 return rv; 1225 1226 new_user = vzalloc(sizeof(*new_user)); 1227 if (!new_user) 1228 return -ENOMEM; 1229 1230 index = srcu_read_lock(&ipmi_interfaces_srcu); 1231 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1232 if (intf->intf_num == if_num) 1233 goto found; 1234 } 1235 /* Not found, return an error */ 1236 rv = -EINVAL; 1237 goto out_kfree; 1238 1239 found: 1240 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1241 rv = -EBUSY; 1242 goto out_kfree; 1243 } 1244 1245 INIT_WORK(&new_user->remove_work, free_user_work); 1246 1247 rv = init_srcu_struct(&new_user->release_barrier); 1248 if (rv) 1249 goto out_kfree; 1250 1251 if (!try_module_get(intf->owner)) { 1252 rv = -ENODEV; 1253 goto out_kfree; 1254 } 1255 1256 /* Note that each existing user holds a refcount to the interface. */ 1257 kref_get(&intf->refcount); 1258 1259 kref_init(&new_user->refcount); 1260 new_user->handler = handler; 1261 new_user->handler_data = handler_data; 1262 new_user->intf = intf; 1263 new_user->gets_events = false; 1264 1265 rcu_assign_pointer(new_user->self, new_user); 1266 spin_lock_irqsave(&intf->seq_lock, flags); 1267 list_add_rcu(&new_user->link, &intf->users); 1268 spin_unlock_irqrestore(&intf->seq_lock, flags); 1269 if (handler->ipmi_watchdog_pretimeout) 1270 /* User wants pretimeouts, so make sure to watch for them. */ 1271 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1272 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1273 *user = new_user; 1274 return 0; 1275 1276 out_kfree: 1277 atomic_dec(&intf->nr_users); 1278 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1279 vfree(new_user); 1280 return rv; 1281 } 1282 EXPORT_SYMBOL(ipmi_create_user); 1283 1284 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1285 { 1286 int rv, index; 1287 struct ipmi_smi *intf; 1288 1289 index = srcu_read_lock(&ipmi_interfaces_srcu); 1290 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1291 if (intf->intf_num == if_num) 1292 goto found; 1293 } 1294 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1295 1296 /* Not found, return an error */ 1297 return -EINVAL; 1298 1299 found: 1300 if (!intf->handlers->get_smi_info) 1301 rv = -ENOTTY; 1302 else 1303 rv = intf->handlers->get_smi_info(intf->send_info, data); 1304 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1305 1306 return rv; 1307 } 1308 EXPORT_SYMBOL(ipmi_get_smi_info); 1309 1310 static void free_user(struct kref *ref) 1311 { 1312 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1313 1314 /* SRCU cleanup must happen in task context. */ 1315 queue_work(remove_work_wq, &user->remove_work); 1316 } 1317 1318 static void _ipmi_destroy_user(struct ipmi_user *user) 1319 { 1320 struct ipmi_smi *intf = user->intf; 1321 int i; 1322 unsigned long flags; 1323 struct cmd_rcvr *rcvr; 1324 struct cmd_rcvr *rcvrs = NULL; 1325 1326 if (!acquire_ipmi_user(user, &i)) { 1327 /* 1328 * The user has already been cleaned up, just make sure 1329 * nothing is using it and return. 1330 */ 1331 synchronize_srcu(&user->release_barrier); 1332 return; 1333 } 1334 1335 rcu_assign_pointer(user->self, NULL); 1336 release_ipmi_user(user, i); 1337 1338 synchronize_srcu(&user->release_barrier); 1339 1340 if (user->handler->shutdown) 1341 user->handler->shutdown(user->handler_data); 1342 1343 if (user->handler->ipmi_watchdog_pretimeout) 1344 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1345 1346 if (user->gets_events) 1347 atomic_dec(&intf->event_waiters); 1348 1349 /* Remove the user from the interface's sequence table. */ 1350 spin_lock_irqsave(&intf->seq_lock, flags); 1351 list_del_rcu(&user->link); 1352 atomic_dec(&intf->nr_users); 1353 1354 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1355 if (intf->seq_table[i].inuse 1356 && (intf->seq_table[i].recv_msg->user == user)) { 1357 intf->seq_table[i].inuse = 0; 1358 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1359 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1360 } 1361 } 1362 spin_unlock_irqrestore(&intf->seq_lock, flags); 1363 1364 /* 1365 * Remove the user from the command receiver's table. First 1366 * we build a list of everything (not using the standard link, 1367 * since other things may be using it till we do 1368 * synchronize_srcu()) then free everything in that list. 1369 */ 1370 mutex_lock(&intf->cmd_rcvrs_mutex); 1371 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1372 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1373 if (rcvr->user == user) { 1374 list_del_rcu(&rcvr->link); 1375 rcvr->next = rcvrs; 1376 rcvrs = rcvr; 1377 } 1378 } 1379 mutex_unlock(&intf->cmd_rcvrs_mutex); 1380 synchronize_rcu(); 1381 while (rcvrs) { 1382 rcvr = rcvrs; 1383 rcvrs = rcvr->next; 1384 kfree(rcvr); 1385 } 1386 1387 kref_put(&intf->refcount, intf_free); 1388 module_put(intf->owner); 1389 } 1390 1391 int ipmi_destroy_user(struct ipmi_user *user) 1392 { 1393 _ipmi_destroy_user(user); 1394 1395 kref_put(&user->refcount, free_user); 1396 1397 return 0; 1398 } 1399 EXPORT_SYMBOL(ipmi_destroy_user); 1400 1401 int ipmi_get_version(struct ipmi_user *user, 1402 unsigned char *major, 1403 unsigned char *minor) 1404 { 1405 struct ipmi_device_id id; 1406 int rv, index; 1407 1408 user = acquire_ipmi_user(user, &index); 1409 if (!user) 1410 return -ENODEV; 1411 1412 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1413 if (!rv) { 1414 *major = ipmi_version_major(&id); 1415 *minor = ipmi_version_minor(&id); 1416 } 1417 release_ipmi_user(user, index); 1418 1419 return rv; 1420 } 1421 EXPORT_SYMBOL(ipmi_get_version); 1422 1423 int ipmi_set_my_address(struct ipmi_user *user, 1424 unsigned int channel, 1425 unsigned char address) 1426 { 1427 int index, rv = 0; 1428 1429 user = acquire_ipmi_user(user, &index); 1430 if (!user) 1431 return -ENODEV; 1432 1433 if (channel >= IPMI_MAX_CHANNELS) { 1434 rv = -EINVAL; 1435 } else { 1436 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1437 user->intf->addrinfo[channel].address = address; 1438 } 1439 release_ipmi_user(user, index); 1440 1441 return rv; 1442 } 1443 EXPORT_SYMBOL(ipmi_set_my_address); 1444 1445 int ipmi_get_my_address(struct ipmi_user *user, 1446 unsigned int channel, 1447 unsigned char *address) 1448 { 1449 int index, rv = 0; 1450 1451 user = acquire_ipmi_user(user, &index); 1452 if (!user) 1453 return -ENODEV; 1454 1455 if (channel >= IPMI_MAX_CHANNELS) { 1456 rv = -EINVAL; 1457 } else { 1458 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1459 *address = user->intf->addrinfo[channel].address; 1460 } 1461 release_ipmi_user(user, index); 1462 1463 return rv; 1464 } 1465 EXPORT_SYMBOL(ipmi_get_my_address); 1466 1467 int ipmi_set_my_LUN(struct ipmi_user *user, 1468 unsigned int channel, 1469 unsigned char LUN) 1470 { 1471 int index, rv = 0; 1472 1473 user = acquire_ipmi_user(user, &index); 1474 if (!user) 1475 return -ENODEV; 1476 1477 if (channel >= IPMI_MAX_CHANNELS) { 1478 rv = -EINVAL; 1479 } else { 1480 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1481 user->intf->addrinfo[channel].lun = LUN & 0x3; 1482 } 1483 release_ipmi_user(user, index); 1484 1485 return rv; 1486 } 1487 EXPORT_SYMBOL(ipmi_set_my_LUN); 1488 1489 int ipmi_get_my_LUN(struct ipmi_user *user, 1490 unsigned int channel, 1491 unsigned char *address) 1492 { 1493 int index, rv = 0; 1494 1495 user = acquire_ipmi_user(user, &index); 1496 if (!user) 1497 return -ENODEV; 1498 1499 if (channel >= IPMI_MAX_CHANNELS) { 1500 rv = -EINVAL; 1501 } else { 1502 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1503 *address = user->intf->addrinfo[channel].lun; 1504 } 1505 release_ipmi_user(user, index); 1506 1507 return rv; 1508 } 1509 EXPORT_SYMBOL(ipmi_get_my_LUN); 1510 1511 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1512 { 1513 int mode, index; 1514 unsigned long flags; 1515 1516 user = acquire_ipmi_user(user, &index); 1517 if (!user) 1518 return -ENODEV; 1519 1520 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1521 mode = user->intf->maintenance_mode; 1522 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1523 release_ipmi_user(user, index); 1524 1525 return mode; 1526 } 1527 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1528 1529 static void maintenance_mode_update(struct ipmi_smi *intf) 1530 { 1531 if (intf->handlers->set_maintenance_mode) 1532 intf->handlers->set_maintenance_mode( 1533 intf->send_info, intf->maintenance_mode_enable); 1534 } 1535 1536 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1537 { 1538 int rv = 0, index; 1539 unsigned long flags; 1540 struct ipmi_smi *intf = user->intf; 1541 1542 user = acquire_ipmi_user(user, &index); 1543 if (!user) 1544 return -ENODEV; 1545 1546 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1547 if (intf->maintenance_mode != mode) { 1548 switch (mode) { 1549 case IPMI_MAINTENANCE_MODE_AUTO: 1550 intf->maintenance_mode_enable 1551 = (intf->auto_maintenance_timeout > 0); 1552 break; 1553 1554 case IPMI_MAINTENANCE_MODE_OFF: 1555 intf->maintenance_mode_enable = false; 1556 break; 1557 1558 case IPMI_MAINTENANCE_MODE_ON: 1559 intf->maintenance_mode_enable = true; 1560 break; 1561 1562 default: 1563 rv = -EINVAL; 1564 goto out_unlock; 1565 } 1566 intf->maintenance_mode = mode; 1567 1568 maintenance_mode_update(intf); 1569 } 1570 out_unlock: 1571 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1572 release_ipmi_user(user, index); 1573 1574 return rv; 1575 } 1576 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1577 1578 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1579 { 1580 unsigned long flags; 1581 struct ipmi_smi *intf = user->intf; 1582 struct ipmi_recv_msg *msg, *msg2; 1583 struct list_head msgs; 1584 int index; 1585 1586 user = acquire_ipmi_user(user, &index); 1587 if (!user) 1588 return -ENODEV; 1589 1590 INIT_LIST_HEAD(&msgs); 1591 1592 spin_lock_irqsave(&intf->events_lock, flags); 1593 if (user->gets_events == val) 1594 goto out; 1595 1596 user->gets_events = val; 1597 1598 if (val) { 1599 if (atomic_inc_return(&intf->event_waiters) == 1) 1600 need_waiter(intf); 1601 } else { 1602 atomic_dec(&intf->event_waiters); 1603 } 1604 1605 if (intf->delivering_events) 1606 /* 1607 * Another thread is delivering events for this, so 1608 * let it handle any new events. 1609 */ 1610 goto out; 1611 1612 /* Deliver any queued events. */ 1613 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1614 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1615 list_move_tail(&msg->link, &msgs); 1616 intf->waiting_events_count = 0; 1617 if (intf->event_msg_printed) { 1618 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1619 intf->event_msg_printed = 0; 1620 } 1621 1622 intf->delivering_events = 1; 1623 spin_unlock_irqrestore(&intf->events_lock, flags); 1624 1625 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1626 msg->user = user; 1627 kref_get(&user->refcount); 1628 deliver_local_response(intf, msg); 1629 } 1630 1631 spin_lock_irqsave(&intf->events_lock, flags); 1632 intf->delivering_events = 0; 1633 } 1634 1635 out: 1636 spin_unlock_irqrestore(&intf->events_lock, flags); 1637 release_ipmi_user(user, index); 1638 1639 return 0; 1640 } 1641 EXPORT_SYMBOL(ipmi_set_gets_events); 1642 1643 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1644 unsigned char netfn, 1645 unsigned char cmd, 1646 unsigned char chan) 1647 { 1648 struct cmd_rcvr *rcvr; 1649 1650 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1651 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1652 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1653 && (rcvr->chans & (1 << chan))) 1654 return rcvr; 1655 } 1656 return NULL; 1657 } 1658 1659 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1660 unsigned char netfn, 1661 unsigned char cmd, 1662 unsigned int chans) 1663 { 1664 struct cmd_rcvr *rcvr; 1665 1666 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1667 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1668 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1669 && (rcvr->chans & chans)) 1670 return 0; 1671 } 1672 return 1; 1673 } 1674 1675 int ipmi_register_for_cmd(struct ipmi_user *user, 1676 unsigned char netfn, 1677 unsigned char cmd, 1678 unsigned int chans) 1679 { 1680 struct ipmi_smi *intf = user->intf; 1681 struct cmd_rcvr *rcvr; 1682 int rv = 0, index; 1683 1684 user = acquire_ipmi_user(user, &index); 1685 if (!user) 1686 return -ENODEV; 1687 1688 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1689 if (!rcvr) { 1690 rv = -ENOMEM; 1691 goto out_release; 1692 } 1693 rcvr->cmd = cmd; 1694 rcvr->netfn = netfn; 1695 rcvr->chans = chans; 1696 rcvr->user = user; 1697 1698 mutex_lock(&intf->cmd_rcvrs_mutex); 1699 /* Make sure the command/netfn is not already registered. */ 1700 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1701 rv = -EBUSY; 1702 goto out_unlock; 1703 } 1704 1705 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1706 1707 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1708 1709 out_unlock: 1710 mutex_unlock(&intf->cmd_rcvrs_mutex); 1711 if (rv) 1712 kfree(rcvr); 1713 out_release: 1714 release_ipmi_user(user, index); 1715 1716 return rv; 1717 } 1718 EXPORT_SYMBOL(ipmi_register_for_cmd); 1719 1720 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1721 unsigned char netfn, 1722 unsigned char cmd, 1723 unsigned int chans) 1724 { 1725 struct ipmi_smi *intf = user->intf; 1726 struct cmd_rcvr *rcvr; 1727 struct cmd_rcvr *rcvrs = NULL; 1728 int i, rv = -ENOENT, index; 1729 1730 user = acquire_ipmi_user(user, &index); 1731 if (!user) 1732 return -ENODEV; 1733 1734 mutex_lock(&intf->cmd_rcvrs_mutex); 1735 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1736 if (((1 << i) & chans) == 0) 1737 continue; 1738 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1739 if (rcvr == NULL) 1740 continue; 1741 if (rcvr->user == user) { 1742 rv = 0; 1743 rcvr->chans &= ~chans; 1744 if (rcvr->chans == 0) { 1745 list_del_rcu(&rcvr->link); 1746 rcvr->next = rcvrs; 1747 rcvrs = rcvr; 1748 } 1749 } 1750 } 1751 mutex_unlock(&intf->cmd_rcvrs_mutex); 1752 synchronize_rcu(); 1753 release_ipmi_user(user, index); 1754 while (rcvrs) { 1755 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1756 rcvr = rcvrs; 1757 rcvrs = rcvr->next; 1758 kfree(rcvr); 1759 } 1760 1761 return rv; 1762 } 1763 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1764 1765 unsigned char 1766 ipmb_checksum(unsigned char *data, int size) 1767 { 1768 unsigned char csum = 0; 1769 1770 for (; size > 0; size--, data++) 1771 csum += *data; 1772 1773 return -csum; 1774 } 1775 EXPORT_SYMBOL(ipmb_checksum); 1776 1777 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1778 struct kernel_ipmi_msg *msg, 1779 struct ipmi_ipmb_addr *ipmb_addr, 1780 long msgid, 1781 unsigned char ipmb_seq, 1782 int broadcast, 1783 unsigned char source_address, 1784 unsigned char source_lun) 1785 { 1786 int i = broadcast; 1787 1788 /* Format the IPMB header data. */ 1789 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1790 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1791 smi_msg->data[2] = ipmb_addr->channel; 1792 if (broadcast) 1793 smi_msg->data[3] = 0; 1794 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1795 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1796 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1797 smi_msg->data[i+6] = source_address; 1798 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1799 smi_msg->data[i+8] = msg->cmd; 1800 1801 /* Now tack on the data to the message. */ 1802 if (msg->data_len > 0) 1803 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1804 smi_msg->data_size = msg->data_len + 9; 1805 1806 /* Now calculate the checksum and tack it on. */ 1807 smi_msg->data[i+smi_msg->data_size] 1808 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1809 1810 /* 1811 * Add on the checksum size and the offset from the 1812 * broadcast. 1813 */ 1814 smi_msg->data_size += 1 + i; 1815 1816 smi_msg->msgid = msgid; 1817 } 1818 1819 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1820 struct kernel_ipmi_msg *msg, 1821 struct ipmi_lan_addr *lan_addr, 1822 long msgid, 1823 unsigned char ipmb_seq, 1824 unsigned char source_lun) 1825 { 1826 /* Format the IPMB header data. */ 1827 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1828 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1829 smi_msg->data[2] = lan_addr->channel; 1830 smi_msg->data[3] = lan_addr->session_handle; 1831 smi_msg->data[4] = lan_addr->remote_SWID; 1832 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1833 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1834 smi_msg->data[7] = lan_addr->local_SWID; 1835 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1836 smi_msg->data[9] = msg->cmd; 1837 1838 /* Now tack on the data to the message. */ 1839 if (msg->data_len > 0) 1840 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1841 smi_msg->data_size = msg->data_len + 10; 1842 1843 /* Now calculate the checksum and tack it on. */ 1844 smi_msg->data[smi_msg->data_size] 1845 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1846 1847 /* 1848 * Add on the checksum size and the offset from the 1849 * broadcast. 1850 */ 1851 smi_msg->data_size += 1; 1852 1853 smi_msg->msgid = msgid; 1854 } 1855 1856 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1857 struct ipmi_smi_msg *smi_msg, 1858 int priority) 1859 { 1860 if (intf->curr_msg) { 1861 if (priority > 0) 1862 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1863 else 1864 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1865 smi_msg = NULL; 1866 } else { 1867 intf->curr_msg = smi_msg; 1868 } 1869 1870 return smi_msg; 1871 } 1872 1873 static void smi_send(struct ipmi_smi *intf, 1874 const struct ipmi_smi_handlers *handlers, 1875 struct ipmi_smi_msg *smi_msg, int priority) 1876 { 1877 int run_to_completion = intf->run_to_completion; 1878 unsigned long flags = 0; 1879 1880 if (!run_to_completion) 1881 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1882 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1883 1884 if (!run_to_completion) 1885 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1886 1887 if (smi_msg) 1888 handlers->sender(intf->send_info, smi_msg); 1889 } 1890 1891 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1892 { 1893 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1894 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1895 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1896 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1897 } 1898 1899 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1900 struct ipmi_addr *addr, 1901 long msgid, 1902 struct kernel_ipmi_msg *msg, 1903 struct ipmi_smi_msg *smi_msg, 1904 struct ipmi_recv_msg *recv_msg, 1905 int retries, 1906 unsigned int retry_time_ms) 1907 { 1908 struct ipmi_system_interface_addr *smi_addr; 1909 1910 if (msg->netfn & 1) 1911 /* Responses are not allowed to the SMI. */ 1912 return -EINVAL; 1913 1914 smi_addr = (struct ipmi_system_interface_addr *) addr; 1915 if (smi_addr->lun > 3) { 1916 ipmi_inc_stat(intf, sent_invalid_commands); 1917 return -EINVAL; 1918 } 1919 1920 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1921 1922 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1923 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1924 || (msg->cmd == IPMI_GET_MSG_CMD) 1925 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1926 /* 1927 * We don't let the user do these, since we manage 1928 * the sequence numbers. 1929 */ 1930 ipmi_inc_stat(intf, sent_invalid_commands); 1931 return -EINVAL; 1932 } 1933 1934 if (is_maintenance_mode_cmd(msg)) { 1935 unsigned long flags; 1936 1937 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1938 intf->auto_maintenance_timeout 1939 = maintenance_mode_timeout_ms; 1940 if (!intf->maintenance_mode 1941 && !intf->maintenance_mode_enable) { 1942 intf->maintenance_mode_enable = true; 1943 maintenance_mode_update(intf); 1944 } 1945 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1946 flags); 1947 } 1948 1949 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1950 ipmi_inc_stat(intf, sent_invalid_commands); 1951 return -EMSGSIZE; 1952 } 1953 1954 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1955 smi_msg->data[1] = msg->cmd; 1956 smi_msg->msgid = msgid; 1957 smi_msg->user_data = recv_msg; 1958 if (msg->data_len > 0) 1959 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1960 smi_msg->data_size = msg->data_len + 2; 1961 ipmi_inc_stat(intf, sent_local_commands); 1962 1963 return 0; 1964 } 1965 1966 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1967 struct ipmi_addr *addr, 1968 long msgid, 1969 struct kernel_ipmi_msg *msg, 1970 struct ipmi_smi_msg *smi_msg, 1971 struct ipmi_recv_msg *recv_msg, 1972 unsigned char source_address, 1973 unsigned char source_lun, 1974 int retries, 1975 unsigned int retry_time_ms) 1976 { 1977 struct ipmi_ipmb_addr *ipmb_addr; 1978 unsigned char ipmb_seq; 1979 long seqid; 1980 int broadcast = 0; 1981 struct ipmi_channel *chans; 1982 int rv = 0; 1983 1984 if (addr->channel >= IPMI_MAX_CHANNELS) { 1985 ipmi_inc_stat(intf, sent_invalid_commands); 1986 return -EINVAL; 1987 } 1988 1989 chans = READ_ONCE(intf->channel_list)->c; 1990 1991 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1992 ipmi_inc_stat(intf, sent_invalid_commands); 1993 return -EINVAL; 1994 } 1995 1996 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1997 /* 1998 * Broadcasts add a zero at the beginning of the 1999 * message, but otherwise is the same as an IPMB 2000 * address. 2001 */ 2002 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2003 broadcast = 1; 2004 retries = 0; /* Don't retry broadcasts. */ 2005 } 2006 2007 /* 2008 * 9 for the header and 1 for the checksum, plus 2009 * possibly one for the broadcast. 2010 */ 2011 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 2012 ipmi_inc_stat(intf, sent_invalid_commands); 2013 return -EMSGSIZE; 2014 } 2015 2016 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2017 if (ipmb_addr->lun > 3) { 2018 ipmi_inc_stat(intf, sent_invalid_commands); 2019 return -EINVAL; 2020 } 2021 2022 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2023 2024 if (recv_msg->msg.netfn & 0x1) { 2025 /* 2026 * It's a response, so use the user's sequence 2027 * from msgid. 2028 */ 2029 ipmi_inc_stat(intf, sent_ipmb_responses); 2030 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2031 msgid, broadcast, 2032 source_address, source_lun); 2033 2034 /* 2035 * Save the receive message so we can use it 2036 * to deliver the response. 2037 */ 2038 smi_msg->user_data = recv_msg; 2039 } else { 2040 /* It's a command, so get a sequence for it. */ 2041 unsigned long flags; 2042 2043 spin_lock_irqsave(&intf->seq_lock, flags); 2044 2045 if (is_maintenance_mode_cmd(msg)) 2046 intf->ipmb_maintenance_mode_timeout = 2047 maintenance_mode_timeout_ms; 2048 2049 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2050 /* Different default in maintenance mode */ 2051 retry_time_ms = default_maintenance_retry_ms; 2052 2053 /* 2054 * Create a sequence number with a 1 second 2055 * timeout and 4 retries. 2056 */ 2057 rv = intf_next_seq(intf, 2058 recv_msg, 2059 retry_time_ms, 2060 retries, 2061 broadcast, 2062 &ipmb_seq, 2063 &seqid); 2064 if (rv) 2065 /* 2066 * We have used up all the sequence numbers, 2067 * probably, so abort. 2068 */ 2069 goto out_err; 2070 2071 ipmi_inc_stat(intf, sent_ipmb_commands); 2072 2073 /* 2074 * Store the sequence number in the message, 2075 * so that when the send message response 2076 * comes back we can start the timer. 2077 */ 2078 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2079 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2080 ipmb_seq, broadcast, 2081 source_address, source_lun); 2082 2083 /* 2084 * Copy the message into the recv message data, so we 2085 * can retransmit it later if necessary. 2086 */ 2087 memcpy(recv_msg->msg_data, smi_msg->data, 2088 smi_msg->data_size); 2089 recv_msg->msg.data = recv_msg->msg_data; 2090 recv_msg->msg.data_len = smi_msg->data_size; 2091 2092 /* 2093 * We don't unlock until here, because we need 2094 * to copy the completed message into the 2095 * recv_msg before we release the lock. 2096 * Otherwise, race conditions may bite us. I 2097 * know that's pretty paranoid, but I prefer 2098 * to be correct. 2099 */ 2100 out_err: 2101 spin_unlock_irqrestore(&intf->seq_lock, flags); 2102 } 2103 2104 return rv; 2105 } 2106 2107 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2108 struct ipmi_addr *addr, 2109 long msgid, 2110 struct kernel_ipmi_msg *msg, 2111 struct ipmi_smi_msg *smi_msg, 2112 struct ipmi_recv_msg *recv_msg, 2113 unsigned char source_lun) 2114 { 2115 struct ipmi_ipmb_direct_addr *daddr; 2116 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2117 2118 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2119 return -EAFNOSUPPORT; 2120 2121 /* Responses must have a completion code. */ 2122 if (!is_cmd && msg->data_len < 1) { 2123 ipmi_inc_stat(intf, sent_invalid_commands); 2124 return -EINVAL; 2125 } 2126 2127 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2128 ipmi_inc_stat(intf, sent_invalid_commands); 2129 return -EMSGSIZE; 2130 } 2131 2132 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2133 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2134 ipmi_inc_stat(intf, sent_invalid_commands); 2135 return -EINVAL; 2136 } 2137 2138 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2139 smi_msg->msgid = msgid; 2140 2141 if (is_cmd) { 2142 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2143 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2144 } else { 2145 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2146 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2147 } 2148 smi_msg->data[1] = daddr->slave_addr; 2149 smi_msg->data[3] = msg->cmd; 2150 2151 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2152 smi_msg->data_size = msg->data_len + 4; 2153 2154 smi_msg->user_data = recv_msg; 2155 2156 return 0; 2157 } 2158 2159 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2160 struct ipmi_addr *addr, 2161 long msgid, 2162 struct kernel_ipmi_msg *msg, 2163 struct ipmi_smi_msg *smi_msg, 2164 struct ipmi_recv_msg *recv_msg, 2165 unsigned char source_lun, 2166 int retries, 2167 unsigned int retry_time_ms) 2168 { 2169 struct ipmi_lan_addr *lan_addr; 2170 unsigned char ipmb_seq; 2171 long seqid; 2172 struct ipmi_channel *chans; 2173 int rv = 0; 2174 2175 if (addr->channel >= IPMI_MAX_CHANNELS) { 2176 ipmi_inc_stat(intf, sent_invalid_commands); 2177 return -EINVAL; 2178 } 2179 2180 chans = READ_ONCE(intf->channel_list)->c; 2181 2182 if ((chans[addr->channel].medium 2183 != IPMI_CHANNEL_MEDIUM_8023LAN) 2184 && (chans[addr->channel].medium 2185 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2186 ipmi_inc_stat(intf, sent_invalid_commands); 2187 return -EINVAL; 2188 } 2189 2190 /* 11 for the header and 1 for the checksum. */ 2191 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2192 ipmi_inc_stat(intf, sent_invalid_commands); 2193 return -EMSGSIZE; 2194 } 2195 2196 lan_addr = (struct ipmi_lan_addr *) addr; 2197 if (lan_addr->lun > 3) { 2198 ipmi_inc_stat(intf, sent_invalid_commands); 2199 return -EINVAL; 2200 } 2201 2202 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2203 2204 if (recv_msg->msg.netfn & 0x1) { 2205 /* 2206 * It's a response, so use the user's sequence 2207 * from msgid. 2208 */ 2209 ipmi_inc_stat(intf, sent_lan_responses); 2210 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2211 msgid, source_lun); 2212 2213 /* 2214 * Save the receive message so we can use it 2215 * to deliver the response. 2216 */ 2217 smi_msg->user_data = recv_msg; 2218 } else { 2219 /* It's a command, so get a sequence for it. */ 2220 unsigned long flags; 2221 2222 spin_lock_irqsave(&intf->seq_lock, flags); 2223 2224 /* 2225 * Create a sequence number with a 1 second 2226 * timeout and 4 retries. 2227 */ 2228 rv = intf_next_seq(intf, 2229 recv_msg, 2230 retry_time_ms, 2231 retries, 2232 0, 2233 &ipmb_seq, 2234 &seqid); 2235 if (rv) 2236 /* 2237 * We have used up all the sequence numbers, 2238 * probably, so abort. 2239 */ 2240 goto out_err; 2241 2242 ipmi_inc_stat(intf, sent_lan_commands); 2243 2244 /* 2245 * Store the sequence number in the message, 2246 * so that when the send message response 2247 * comes back we can start the timer. 2248 */ 2249 format_lan_msg(smi_msg, msg, lan_addr, 2250 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2251 ipmb_seq, source_lun); 2252 2253 /* 2254 * Copy the message into the recv message data, so we 2255 * can retransmit it later if necessary. 2256 */ 2257 memcpy(recv_msg->msg_data, smi_msg->data, 2258 smi_msg->data_size); 2259 recv_msg->msg.data = recv_msg->msg_data; 2260 recv_msg->msg.data_len = smi_msg->data_size; 2261 2262 /* 2263 * We don't unlock until here, because we need 2264 * to copy the completed message into the 2265 * recv_msg before we release the lock. 2266 * Otherwise, race conditions may bite us. I 2267 * know that's pretty paranoid, but I prefer 2268 * to be correct. 2269 */ 2270 out_err: 2271 spin_unlock_irqrestore(&intf->seq_lock, flags); 2272 } 2273 2274 return rv; 2275 } 2276 2277 /* 2278 * Separate from ipmi_request so that the user does not have to be 2279 * supplied in certain circumstances (mainly at panic time). If 2280 * messages are supplied, they will be freed, even if an error 2281 * occurs. 2282 */ 2283 static int i_ipmi_request(struct ipmi_user *user, 2284 struct ipmi_smi *intf, 2285 struct ipmi_addr *addr, 2286 long msgid, 2287 struct kernel_ipmi_msg *msg, 2288 void *user_msg_data, 2289 void *supplied_smi, 2290 struct ipmi_recv_msg *supplied_recv, 2291 int priority, 2292 unsigned char source_address, 2293 unsigned char source_lun, 2294 int retries, 2295 unsigned int retry_time_ms) 2296 { 2297 struct ipmi_smi_msg *smi_msg; 2298 struct ipmi_recv_msg *recv_msg; 2299 int rv = 0; 2300 2301 if (supplied_recv) 2302 recv_msg = supplied_recv; 2303 else { 2304 recv_msg = ipmi_alloc_recv_msg(); 2305 if (recv_msg == NULL) { 2306 rv = -ENOMEM; 2307 goto out; 2308 } 2309 } 2310 recv_msg->user_msg_data = user_msg_data; 2311 2312 if (supplied_smi) 2313 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2314 else { 2315 smi_msg = ipmi_alloc_smi_msg(); 2316 if (smi_msg == NULL) { 2317 if (!supplied_recv) 2318 ipmi_free_recv_msg(recv_msg); 2319 rv = -ENOMEM; 2320 goto out; 2321 } 2322 } 2323 2324 rcu_read_lock(); 2325 if (intf->in_shutdown) { 2326 rv = -ENODEV; 2327 goto out_err; 2328 } 2329 2330 recv_msg->user = user; 2331 if (user) 2332 /* The put happens when the message is freed. */ 2333 kref_get(&user->refcount); 2334 recv_msg->msgid = msgid; 2335 /* 2336 * Store the message to send in the receive message so timeout 2337 * responses can get the proper response data. 2338 */ 2339 recv_msg->msg = *msg; 2340 2341 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2342 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2343 recv_msg, retries, retry_time_ms); 2344 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2345 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2346 source_address, source_lun, 2347 retries, retry_time_ms); 2348 } else if (is_ipmb_direct_addr(addr)) { 2349 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2350 recv_msg, source_lun); 2351 } else if (is_lan_addr(addr)) { 2352 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2353 source_lun, retries, retry_time_ms); 2354 } else { 2355 /* Unknown address type. */ 2356 ipmi_inc_stat(intf, sent_invalid_commands); 2357 rv = -EINVAL; 2358 } 2359 2360 if (rv) { 2361 out_err: 2362 ipmi_free_smi_msg(smi_msg); 2363 ipmi_free_recv_msg(recv_msg); 2364 } else { 2365 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); 2366 2367 smi_send(intf, intf->handlers, smi_msg, priority); 2368 } 2369 rcu_read_unlock(); 2370 2371 out: 2372 return rv; 2373 } 2374 2375 static int check_addr(struct ipmi_smi *intf, 2376 struct ipmi_addr *addr, 2377 unsigned char *saddr, 2378 unsigned char *lun) 2379 { 2380 if (addr->channel >= IPMI_MAX_CHANNELS) 2381 return -EINVAL; 2382 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2383 *lun = intf->addrinfo[addr->channel].lun; 2384 *saddr = intf->addrinfo[addr->channel].address; 2385 return 0; 2386 } 2387 2388 int ipmi_request_settime(struct ipmi_user *user, 2389 struct ipmi_addr *addr, 2390 long msgid, 2391 struct kernel_ipmi_msg *msg, 2392 void *user_msg_data, 2393 int priority, 2394 int retries, 2395 unsigned int retry_time_ms) 2396 { 2397 unsigned char saddr = 0, lun = 0; 2398 int rv, index; 2399 2400 if (!user) 2401 return -EINVAL; 2402 2403 user = acquire_ipmi_user(user, &index); 2404 if (!user) 2405 return -ENODEV; 2406 2407 rv = check_addr(user->intf, addr, &saddr, &lun); 2408 if (!rv) 2409 rv = i_ipmi_request(user, 2410 user->intf, 2411 addr, 2412 msgid, 2413 msg, 2414 user_msg_data, 2415 NULL, NULL, 2416 priority, 2417 saddr, 2418 lun, 2419 retries, 2420 retry_time_ms); 2421 2422 release_ipmi_user(user, index); 2423 return rv; 2424 } 2425 EXPORT_SYMBOL(ipmi_request_settime); 2426 2427 int ipmi_request_supply_msgs(struct ipmi_user *user, 2428 struct ipmi_addr *addr, 2429 long msgid, 2430 struct kernel_ipmi_msg *msg, 2431 void *user_msg_data, 2432 void *supplied_smi, 2433 struct ipmi_recv_msg *supplied_recv, 2434 int priority) 2435 { 2436 unsigned char saddr = 0, lun = 0; 2437 int rv, index; 2438 2439 if (!user) 2440 return -EINVAL; 2441 2442 user = acquire_ipmi_user(user, &index); 2443 if (!user) 2444 return -ENODEV; 2445 2446 rv = check_addr(user->intf, addr, &saddr, &lun); 2447 if (!rv) 2448 rv = i_ipmi_request(user, 2449 user->intf, 2450 addr, 2451 msgid, 2452 msg, 2453 user_msg_data, 2454 supplied_smi, 2455 supplied_recv, 2456 priority, 2457 saddr, 2458 lun, 2459 -1, 0); 2460 2461 release_ipmi_user(user, index); 2462 return rv; 2463 } 2464 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2465 2466 static void bmc_device_id_handler(struct ipmi_smi *intf, 2467 struct ipmi_recv_msg *msg) 2468 { 2469 int rv; 2470 2471 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2472 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2473 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2474 dev_warn(intf->si_dev, 2475 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2476 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2477 return; 2478 } 2479 2480 if (msg->msg.data[0]) { 2481 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2482 msg->msg.data[0]); 2483 intf->bmc->dyn_id_set = 0; 2484 goto out; 2485 } 2486 2487 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2488 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2489 if (rv) { 2490 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2491 /* record completion code when error */ 2492 intf->bmc->cc = msg->msg.data[0]; 2493 intf->bmc->dyn_id_set = 0; 2494 } else { 2495 /* 2496 * Make sure the id data is available before setting 2497 * dyn_id_set. 2498 */ 2499 smp_wmb(); 2500 intf->bmc->dyn_id_set = 1; 2501 } 2502 out: 2503 wake_up(&intf->waitq); 2504 } 2505 2506 static int 2507 send_get_device_id_cmd(struct ipmi_smi *intf) 2508 { 2509 struct ipmi_system_interface_addr si; 2510 struct kernel_ipmi_msg msg; 2511 2512 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2513 si.channel = IPMI_BMC_CHANNEL; 2514 si.lun = 0; 2515 2516 msg.netfn = IPMI_NETFN_APP_REQUEST; 2517 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2518 msg.data = NULL; 2519 msg.data_len = 0; 2520 2521 return i_ipmi_request(NULL, 2522 intf, 2523 (struct ipmi_addr *) &si, 2524 0, 2525 &msg, 2526 intf, 2527 NULL, 2528 NULL, 2529 0, 2530 intf->addrinfo[0].address, 2531 intf->addrinfo[0].lun, 2532 -1, 0); 2533 } 2534 2535 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2536 { 2537 int rv; 2538 unsigned int retry_count = 0; 2539 2540 intf->null_user_handler = bmc_device_id_handler; 2541 2542 retry: 2543 bmc->cc = 0; 2544 bmc->dyn_id_set = 2; 2545 2546 rv = send_get_device_id_cmd(intf); 2547 if (rv) 2548 goto out_reset_handler; 2549 2550 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2551 2552 if (!bmc->dyn_id_set) { 2553 if (bmc->cc != IPMI_CC_NO_ERROR && 2554 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2555 msleep(500); 2556 dev_warn(intf->si_dev, 2557 "BMC returned 0x%2.2x, retry get bmc device id\n", 2558 bmc->cc); 2559 goto retry; 2560 } 2561 2562 rv = -EIO; /* Something went wrong in the fetch. */ 2563 } 2564 2565 /* dyn_id_set makes the id data available. */ 2566 smp_rmb(); 2567 2568 out_reset_handler: 2569 intf->null_user_handler = NULL; 2570 2571 return rv; 2572 } 2573 2574 /* 2575 * Fetch the device id for the bmc/interface. You must pass in either 2576 * bmc or intf, this code will get the other one. If the data has 2577 * been recently fetched, this will just use the cached data. Otherwise 2578 * it will run a new fetch. 2579 * 2580 * Except for the first time this is called (in ipmi_add_smi()), 2581 * this will always return good data; 2582 */ 2583 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2584 struct ipmi_device_id *id, 2585 bool *guid_set, guid_t *guid, int intf_num) 2586 { 2587 int rv = 0; 2588 int prev_dyn_id_set, prev_guid_set; 2589 bool intf_set = intf != NULL; 2590 2591 if (!intf) { 2592 mutex_lock(&bmc->dyn_mutex); 2593 retry_bmc_lock: 2594 if (list_empty(&bmc->intfs)) { 2595 mutex_unlock(&bmc->dyn_mutex); 2596 return -ENOENT; 2597 } 2598 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2599 bmc_link); 2600 kref_get(&intf->refcount); 2601 mutex_unlock(&bmc->dyn_mutex); 2602 mutex_lock(&intf->bmc_reg_mutex); 2603 mutex_lock(&bmc->dyn_mutex); 2604 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2605 bmc_link)) { 2606 mutex_unlock(&intf->bmc_reg_mutex); 2607 kref_put(&intf->refcount, intf_free); 2608 goto retry_bmc_lock; 2609 } 2610 } else { 2611 mutex_lock(&intf->bmc_reg_mutex); 2612 bmc = intf->bmc; 2613 mutex_lock(&bmc->dyn_mutex); 2614 kref_get(&intf->refcount); 2615 } 2616 2617 /* If we have a valid and current ID, just return that. */ 2618 if (intf->in_bmc_register || 2619 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2620 goto out_noprocessing; 2621 2622 prev_guid_set = bmc->dyn_guid_set; 2623 __get_guid(intf); 2624 2625 prev_dyn_id_set = bmc->dyn_id_set; 2626 rv = __get_device_id(intf, bmc); 2627 if (rv) 2628 goto out; 2629 2630 /* 2631 * The guid, device id, manufacturer id, and product id should 2632 * not change on a BMC. If it does we have to do some dancing. 2633 */ 2634 if (!intf->bmc_registered 2635 || (!prev_guid_set && bmc->dyn_guid_set) 2636 || (!prev_dyn_id_set && bmc->dyn_id_set) 2637 || (prev_guid_set && bmc->dyn_guid_set 2638 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2639 || bmc->id.device_id != bmc->fetch_id.device_id 2640 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2641 || bmc->id.product_id != bmc->fetch_id.product_id) { 2642 struct ipmi_device_id id = bmc->fetch_id; 2643 int guid_set = bmc->dyn_guid_set; 2644 guid_t guid; 2645 2646 guid = bmc->fetch_guid; 2647 mutex_unlock(&bmc->dyn_mutex); 2648 2649 __ipmi_bmc_unregister(intf); 2650 /* Fill in the temporary BMC for good measure. */ 2651 intf->bmc->id = id; 2652 intf->bmc->dyn_guid_set = guid_set; 2653 intf->bmc->guid = guid; 2654 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2655 need_waiter(intf); /* Retry later on an error. */ 2656 else 2657 __scan_channels(intf, &id); 2658 2659 2660 if (!intf_set) { 2661 /* 2662 * We weren't given the interface on the 2663 * command line, so restart the operation on 2664 * the next interface for the BMC. 2665 */ 2666 mutex_unlock(&intf->bmc_reg_mutex); 2667 mutex_lock(&bmc->dyn_mutex); 2668 goto retry_bmc_lock; 2669 } 2670 2671 /* We have a new BMC, set it up. */ 2672 bmc = intf->bmc; 2673 mutex_lock(&bmc->dyn_mutex); 2674 goto out_noprocessing; 2675 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2676 /* Version info changes, scan the channels again. */ 2677 __scan_channels(intf, &bmc->fetch_id); 2678 2679 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2680 2681 out: 2682 if (rv && prev_dyn_id_set) { 2683 rv = 0; /* Ignore failures if we have previous data. */ 2684 bmc->dyn_id_set = prev_dyn_id_set; 2685 } 2686 if (!rv) { 2687 bmc->id = bmc->fetch_id; 2688 if (bmc->dyn_guid_set) 2689 bmc->guid = bmc->fetch_guid; 2690 else if (prev_guid_set) 2691 /* 2692 * The guid used to be valid and it failed to fetch, 2693 * just use the cached value. 2694 */ 2695 bmc->dyn_guid_set = prev_guid_set; 2696 } 2697 out_noprocessing: 2698 if (!rv) { 2699 if (id) 2700 *id = bmc->id; 2701 2702 if (guid_set) 2703 *guid_set = bmc->dyn_guid_set; 2704 2705 if (guid && bmc->dyn_guid_set) 2706 *guid = bmc->guid; 2707 } 2708 2709 mutex_unlock(&bmc->dyn_mutex); 2710 mutex_unlock(&intf->bmc_reg_mutex); 2711 2712 kref_put(&intf->refcount, intf_free); 2713 return rv; 2714 } 2715 2716 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2717 struct ipmi_device_id *id, 2718 bool *guid_set, guid_t *guid) 2719 { 2720 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2721 } 2722 2723 static ssize_t device_id_show(struct device *dev, 2724 struct device_attribute *attr, 2725 char *buf) 2726 { 2727 struct bmc_device *bmc = to_bmc_device(dev); 2728 struct ipmi_device_id id; 2729 int rv; 2730 2731 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2732 if (rv) 2733 return rv; 2734 2735 return sysfs_emit(buf, "%u\n", id.device_id); 2736 } 2737 static DEVICE_ATTR_RO(device_id); 2738 2739 static ssize_t provides_device_sdrs_show(struct device *dev, 2740 struct device_attribute *attr, 2741 char *buf) 2742 { 2743 struct bmc_device *bmc = to_bmc_device(dev); 2744 struct ipmi_device_id id; 2745 int rv; 2746 2747 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2748 if (rv) 2749 return rv; 2750 2751 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2752 } 2753 static DEVICE_ATTR_RO(provides_device_sdrs); 2754 2755 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2756 char *buf) 2757 { 2758 struct bmc_device *bmc = to_bmc_device(dev); 2759 struct ipmi_device_id id; 2760 int rv; 2761 2762 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2763 if (rv) 2764 return rv; 2765 2766 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2767 } 2768 static DEVICE_ATTR_RO(revision); 2769 2770 static ssize_t firmware_revision_show(struct device *dev, 2771 struct device_attribute *attr, 2772 char *buf) 2773 { 2774 struct bmc_device *bmc = to_bmc_device(dev); 2775 struct ipmi_device_id id; 2776 int rv; 2777 2778 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2779 if (rv) 2780 return rv; 2781 2782 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2783 id.firmware_revision_2); 2784 } 2785 static DEVICE_ATTR_RO(firmware_revision); 2786 2787 static ssize_t ipmi_version_show(struct device *dev, 2788 struct device_attribute *attr, 2789 char *buf) 2790 { 2791 struct bmc_device *bmc = to_bmc_device(dev); 2792 struct ipmi_device_id id; 2793 int rv; 2794 2795 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2796 if (rv) 2797 return rv; 2798 2799 return sysfs_emit(buf, "%u.%u\n", 2800 ipmi_version_major(&id), 2801 ipmi_version_minor(&id)); 2802 } 2803 static DEVICE_ATTR_RO(ipmi_version); 2804 2805 static ssize_t add_dev_support_show(struct device *dev, 2806 struct device_attribute *attr, 2807 char *buf) 2808 { 2809 struct bmc_device *bmc = to_bmc_device(dev); 2810 struct ipmi_device_id id; 2811 int rv; 2812 2813 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2814 if (rv) 2815 return rv; 2816 2817 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2818 } 2819 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2820 NULL); 2821 2822 static ssize_t manufacturer_id_show(struct device *dev, 2823 struct device_attribute *attr, 2824 char *buf) 2825 { 2826 struct bmc_device *bmc = to_bmc_device(dev); 2827 struct ipmi_device_id id; 2828 int rv; 2829 2830 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2831 if (rv) 2832 return rv; 2833 2834 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2835 } 2836 static DEVICE_ATTR_RO(manufacturer_id); 2837 2838 static ssize_t product_id_show(struct device *dev, 2839 struct device_attribute *attr, 2840 char *buf) 2841 { 2842 struct bmc_device *bmc = to_bmc_device(dev); 2843 struct ipmi_device_id id; 2844 int rv; 2845 2846 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2847 if (rv) 2848 return rv; 2849 2850 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2851 } 2852 static DEVICE_ATTR_RO(product_id); 2853 2854 static ssize_t aux_firmware_rev_show(struct device *dev, 2855 struct device_attribute *attr, 2856 char *buf) 2857 { 2858 struct bmc_device *bmc = to_bmc_device(dev); 2859 struct ipmi_device_id id; 2860 int rv; 2861 2862 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2863 if (rv) 2864 return rv; 2865 2866 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2867 id.aux_firmware_revision[3], 2868 id.aux_firmware_revision[2], 2869 id.aux_firmware_revision[1], 2870 id.aux_firmware_revision[0]); 2871 } 2872 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2873 2874 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2875 char *buf) 2876 { 2877 struct bmc_device *bmc = to_bmc_device(dev); 2878 bool guid_set; 2879 guid_t guid; 2880 int rv; 2881 2882 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2883 if (rv) 2884 return rv; 2885 if (!guid_set) 2886 return -ENOENT; 2887 2888 return sysfs_emit(buf, "%pUl\n", &guid); 2889 } 2890 static DEVICE_ATTR_RO(guid); 2891 2892 static struct attribute *bmc_dev_attrs[] = { 2893 &dev_attr_device_id.attr, 2894 &dev_attr_provides_device_sdrs.attr, 2895 &dev_attr_revision.attr, 2896 &dev_attr_firmware_revision.attr, 2897 &dev_attr_ipmi_version.attr, 2898 &dev_attr_additional_device_support.attr, 2899 &dev_attr_manufacturer_id.attr, 2900 &dev_attr_product_id.attr, 2901 &dev_attr_aux_firmware_revision.attr, 2902 &dev_attr_guid.attr, 2903 NULL 2904 }; 2905 2906 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2907 struct attribute *attr, int idx) 2908 { 2909 struct device *dev = kobj_to_dev(kobj); 2910 struct bmc_device *bmc = to_bmc_device(dev); 2911 umode_t mode = attr->mode; 2912 int rv; 2913 2914 if (attr == &dev_attr_aux_firmware_revision.attr) { 2915 struct ipmi_device_id id; 2916 2917 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2918 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2919 } 2920 if (attr == &dev_attr_guid.attr) { 2921 bool guid_set; 2922 2923 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2924 return (!rv && guid_set) ? mode : 0; 2925 } 2926 return mode; 2927 } 2928 2929 static const struct attribute_group bmc_dev_attr_group = { 2930 .attrs = bmc_dev_attrs, 2931 .is_visible = bmc_dev_attr_is_visible, 2932 }; 2933 2934 static const struct attribute_group *bmc_dev_attr_groups[] = { 2935 &bmc_dev_attr_group, 2936 NULL 2937 }; 2938 2939 static const struct device_type bmc_device_type = { 2940 .groups = bmc_dev_attr_groups, 2941 }; 2942 2943 static int __find_bmc_guid(struct device *dev, const void *data) 2944 { 2945 const guid_t *guid = data; 2946 struct bmc_device *bmc; 2947 int rv; 2948 2949 if (dev->type != &bmc_device_type) 2950 return 0; 2951 2952 bmc = to_bmc_device(dev); 2953 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2954 if (rv) 2955 rv = kref_get_unless_zero(&bmc->usecount); 2956 return rv; 2957 } 2958 2959 /* 2960 * Returns with the bmc's usecount incremented, if it is non-NULL. 2961 */ 2962 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2963 guid_t *guid) 2964 { 2965 struct device *dev; 2966 struct bmc_device *bmc = NULL; 2967 2968 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2969 if (dev) { 2970 bmc = to_bmc_device(dev); 2971 put_device(dev); 2972 } 2973 return bmc; 2974 } 2975 2976 struct prod_dev_id { 2977 unsigned int product_id; 2978 unsigned char device_id; 2979 }; 2980 2981 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2982 { 2983 const struct prod_dev_id *cid = data; 2984 struct bmc_device *bmc; 2985 int rv; 2986 2987 if (dev->type != &bmc_device_type) 2988 return 0; 2989 2990 bmc = to_bmc_device(dev); 2991 rv = (bmc->id.product_id == cid->product_id 2992 && bmc->id.device_id == cid->device_id); 2993 if (rv) 2994 rv = kref_get_unless_zero(&bmc->usecount); 2995 return rv; 2996 } 2997 2998 /* 2999 * Returns with the bmc's usecount incremented, if it is non-NULL. 3000 */ 3001 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 3002 struct device_driver *drv, 3003 unsigned int product_id, unsigned char device_id) 3004 { 3005 struct prod_dev_id id = { 3006 .product_id = product_id, 3007 .device_id = device_id, 3008 }; 3009 struct device *dev; 3010 struct bmc_device *bmc = NULL; 3011 3012 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3013 if (dev) { 3014 bmc = to_bmc_device(dev); 3015 put_device(dev); 3016 } 3017 return bmc; 3018 } 3019 3020 static DEFINE_IDA(ipmi_bmc_ida); 3021 3022 static void 3023 release_bmc_device(struct device *dev) 3024 { 3025 kfree(to_bmc_device(dev)); 3026 } 3027 3028 static void cleanup_bmc_work(struct work_struct *work) 3029 { 3030 struct bmc_device *bmc = container_of(work, struct bmc_device, 3031 remove_work); 3032 int id = bmc->pdev.id; /* Unregister overwrites id */ 3033 3034 platform_device_unregister(&bmc->pdev); 3035 ida_simple_remove(&ipmi_bmc_ida, id); 3036 } 3037 3038 static void 3039 cleanup_bmc_device(struct kref *ref) 3040 { 3041 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3042 3043 /* 3044 * Remove the platform device in a work queue to avoid issues 3045 * with removing the device attributes while reading a device 3046 * attribute. 3047 */ 3048 queue_work(remove_work_wq, &bmc->remove_work); 3049 } 3050 3051 /* 3052 * Must be called with intf->bmc_reg_mutex held. 3053 */ 3054 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3055 { 3056 struct bmc_device *bmc = intf->bmc; 3057 3058 if (!intf->bmc_registered) 3059 return; 3060 3061 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3062 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3063 kfree(intf->my_dev_name); 3064 intf->my_dev_name = NULL; 3065 3066 mutex_lock(&bmc->dyn_mutex); 3067 list_del(&intf->bmc_link); 3068 mutex_unlock(&bmc->dyn_mutex); 3069 intf->bmc = &intf->tmp_bmc; 3070 kref_put(&bmc->usecount, cleanup_bmc_device); 3071 intf->bmc_registered = false; 3072 } 3073 3074 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3075 { 3076 mutex_lock(&intf->bmc_reg_mutex); 3077 __ipmi_bmc_unregister(intf); 3078 mutex_unlock(&intf->bmc_reg_mutex); 3079 } 3080 3081 /* 3082 * Must be called with intf->bmc_reg_mutex held. 3083 */ 3084 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3085 struct ipmi_device_id *id, 3086 bool guid_set, guid_t *guid, int intf_num) 3087 { 3088 int rv; 3089 struct bmc_device *bmc; 3090 struct bmc_device *old_bmc; 3091 3092 /* 3093 * platform_device_register() can cause bmc_reg_mutex to 3094 * be claimed because of the is_visible functions of 3095 * the attributes. Eliminate possible recursion and 3096 * release the lock. 3097 */ 3098 intf->in_bmc_register = true; 3099 mutex_unlock(&intf->bmc_reg_mutex); 3100 3101 /* 3102 * Try to find if there is an bmc_device struct 3103 * representing the interfaced BMC already 3104 */ 3105 mutex_lock(&ipmidriver_mutex); 3106 if (guid_set) 3107 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3108 else 3109 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3110 id->product_id, 3111 id->device_id); 3112 3113 /* 3114 * If there is already an bmc_device, free the new one, 3115 * otherwise register the new BMC device 3116 */ 3117 if (old_bmc) { 3118 bmc = old_bmc; 3119 /* 3120 * Note: old_bmc already has usecount incremented by 3121 * the BMC find functions. 3122 */ 3123 intf->bmc = old_bmc; 3124 mutex_lock(&bmc->dyn_mutex); 3125 list_add_tail(&intf->bmc_link, &bmc->intfs); 3126 mutex_unlock(&bmc->dyn_mutex); 3127 3128 dev_info(intf->si_dev, 3129 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3130 bmc->id.manufacturer_id, 3131 bmc->id.product_id, 3132 bmc->id.device_id); 3133 } else { 3134 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3135 if (!bmc) { 3136 rv = -ENOMEM; 3137 goto out; 3138 } 3139 INIT_LIST_HEAD(&bmc->intfs); 3140 mutex_init(&bmc->dyn_mutex); 3141 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3142 3143 bmc->id = *id; 3144 bmc->dyn_id_set = 1; 3145 bmc->dyn_guid_set = guid_set; 3146 bmc->guid = *guid; 3147 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3148 3149 bmc->pdev.name = "ipmi_bmc"; 3150 3151 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3152 if (rv < 0) { 3153 kfree(bmc); 3154 goto out; 3155 } 3156 3157 bmc->pdev.dev.driver = &ipmidriver.driver; 3158 bmc->pdev.id = rv; 3159 bmc->pdev.dev.release = release_bmc_device; 3160 bmc->pdev.dev.type = &bmc_device_type; 3161 kref_init(&bmc->usecount); 3162 3163 intf->bmc = bmc; 3164 mutex_lock(&bmc->dyn_mutex); 3165 list_add_tail(&intf->bmc_link, &bmc->intfs); 3166 mutex_unlock(&bmc->dyn_mutex); 3167 3168 rv = platform_device_register(&bmc->pdev); 3169 if (rv) { 3170 dev_err(intf->si_dev, 3171 "Unable to register bmc device: %d\n", 3172 rv); 3173 goto out_list_del; 3174 } 3175 3176 dev_info(intf->si_dev, 3177 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3178 bmc->id.manufacturer_id, 3179 bmc->id.product_id, 3180 bmc->id.device_id); 3181 } 3182 3183 /* 3184 * create symlink from system interface device to bmc device 3185 * and back. 3186 */ 3187 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3188 if (rv) { 3189 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3190 goto out_put_bmc; 3191 } 3192 3193 if (intf_num == -1) 3194 intf_num = intf->intf_num; 3195 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3196 if (!intf->my_dev_name) { 3197 rv = -ENOMEM; 3198 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3199 rv); 3200 goto out_unlink1; 3201 } 3202 3203 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3204 intf->my_dev_name); 3205 if (rv) { 3206 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3207 rv); 3208 goto out_free_my_dev_name; 3209 } 3210 3211 intf->bmc_registered = true; 3212 3213 out: 3214 mutex_unlock(&ipmidriver_mutex); 3215 mutex_lock(&intf->bmc_reg_mutex); 3216 intf->in_bmc_register = false; 3217 return rv; 3218 3219 3220 out_free_my_dev_name: 3221 kfree(intf->my_dev_name); 3222 intf->my_dev_name = NULL; 3223 3224 out_unlink1: 3225 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3226 3227 out_put_bmc: 3228 mutex_lock(&bmc->dyn_mutex); 3229 list_del(&intf->bmc_link); 3230 mutex_unlock(&bmc->dyn_mutex); 3231 intf->bmc = &intf->tmp_bmc; 3232 kref_put(&bmc->usecount, cleanup_bmc_device); 3233 goto out; 3234 3235 out_list_del: 3236 mutex_lock(&bmc->dyn_mutex); 3237 list_del(&intf->bmc_link); 3238 mutex_unlock(&bmc->dyn_mutex); 3239 intf->bmc = &intf->tmp_bmc; 3240 put_device(&bmc->pdev.dev); 3241 goto out; 3242 } 3243 3244 static int 3245 send_guid_cmd(struct ipmi_smi *intf, int chan) 3246 { 3247 struct kernel_ipmi_msg msg; 3248 struct ipmi_system_interface_addr si; 3249 3250 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3251 si.channel = IPMI_BMC_CHANNEL; 3252 si.lun = 0; 3253 3254 msg.netfn = IPMI_NETFN_APP_REQUEST; 3255 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3256 msg.data = NULL; 3257 msg.data_len = 0; 3258 return i_ipmi_request(NULL, 3259 intf, 3260 (struct ipmi_addr *) &si, 3261 0, 3262 &msg, 3263 intf, 3264 NULL, 3265 NULL, 3266 0, 3267 intf->addrinfo[0].address, 3268 intf->addrinfo[0].lun, 3269 -1, 0); 3270 } 3271 3272 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3273 { 3274 struct bmc_device *bmc = intf->bmc; 3275 3276 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3277 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3278 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3279 /* Not for me */ 3280 return; 3281 3282 if (msg->msg.data[0] != 0) { 3283 /* Error from getting the GUID, the BMC doesn't have one. */ 3284 bmc->dyn_guid_set = 0; 3285 goto out; 3286 } 3287 3288 if (msg->msg.data_len < UUID_SIZE + 1) { 3289 bmc->dyn_guid_set = 0; 3290 dev_warn(intf->si_dev, 3291 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3292 msg->msg.data_len, UUID_SIZE + 1); 3293 goto out; 3294 } 3295 3296 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3297 /* 3298 * Make sure the guid data is available before setting 3299 * dyn_guid_set. 3300 */ 3301 smp_wmb(); 3302 bmc->dyn_guid_set = 1; 3303 out: 3304 wake_up(&intf->waitq); 3305 } 3306 3307 static void __get_guid(struct ipmi_smi *intf) 3308 { 3309 int rv; 3310 struct bmc_device *bmc = intf->bmc; 3311 3312 bmc->dyn_guid_set = 2; 3313 intf->null_user_handler = guid_handler; 3314 rv = send_guid_cmd(intf, 0); 3315 if (rv) 3316 /* Send failed, no GUID available. */ 3317 bmc->dyn_guid_set = 0; 3318 else 3319 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3320 3321 /* dyn_guid_set makes the guid data available. */ 3322 smp_rmb(); 3323 3324 intf->null_user_handler = NULL; 3325 } 3326 3327 static int 3328 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3329 { 3330 struct kernel_ipmi_msg msg; 3331 unsigned char data[1]; 3332 struct ipmi_system_interface_addr si; 3333 3334 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3335 si.channel = IPMI_BMC_CHANNEL; 3336 si.lun = 0; 3337 3338 msg.netfn = IPMI_NETFN_APP_REQUEST; 3339 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3340 msg.data = data; 3341 msg.data_len = 1; 3342 data[0] = chan; 3343 return i_ipmi_request(NULL, 3344 intf, 3345 (struct ipmi_addr *) &si, 3346 0, 3347 &msg, 3348 intf, 3349 NULL, 3350 NULL, 3351 0, 3352 intf->addrinfo[0].address, 3353 intf->addrinfo[0].lun, 3354 -1, 0); 3355 } 3356 3357 static void 3358 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3359 { 3360 int rv = 0; 3361 int ch; 3362 unsigned int set = intf->curr_working_cset; 3363 struct ipmi_channel *chans; 3364 3365 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3366 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3367 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3368 /* It's the one we want */ 3369 if (msg->msg.data[0] != 0) { 3370 /* Got an error from the channel, just go on. */ 3371 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3372 /* 3373 * If the MC does not support this 3374 * command, that is legal. We just 3375 * assume it has one IPMB at channel 3376 * zero. 3377 */ 3378 intf->wchannels[set].c[0].medium 3379 = IPMI_CHANNEL_MEDIUM_IPMB; 3380 intf->wchannels[set].c[0].protocol 3381 = IPMI_CHANNEL_PROTOCOL_IPMB; 3382 3383 intf->channel_list = intf->wchannels + set; 3384 intf->channels_ready = true; 3385 wake_up(&intf->waitq); 3386 goto out; 3387 } 3388 goto next_channel; 3389 } 3390 if (msg->msg.data_len < 4) { 3391 /* Message not big enough, just go on. */ 3392 goto next_channel; 3393 } 3394 ch = intf->curr_channel; 3395 chans = intf->wchannels[set].c; 3396 chans[ch].medium = msg->msg.data[2] & 0x7f; 3397 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3398 3399 next_channel: 3400 intf->curr_channel++; 3401 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3402 intf->channel_list = intf->wchannels + set; 3403 intf->channels_ready = true; 3404 wake_up(&intf->waitq); 3405 } else { 3406 intf->channel_list = intf->wchannels + set; 3407 intf->channels_ready = true; 3408 rv = send_channel_info_cmd(intf, intf->curr_channel); 3409 } 3410 3411 if (rv) { 3412 /* Got an error somehow, just give up. */ 3413 dev_warn(intf->si_dev, 3414 "Error sending channel information for channel %d: %d\n", 3415 intf->curr_channel, rv); 3416 3417 intf->channel_list = intf->wchannels + set; 3418 intf->channels_ready = true; 3419 wake_up(&intf->waitq); 3420 } 3421 } 3422 out: 3423 return; 3424 } 3425 3426 /* 3427 * Must be holding intf->bmc_reg_mutex to call this. 3428 */ 3429 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3430 { 3431 int rv; 3432 3433 if (ipmi_version_major(id) > 1 3434 || (ipmi_version_major(id) == 1 3435 && ipmi_version_minor(id) >= 5)) { 3436 unsigned int set; 3437 3438 /* 3439 * Start scanning the channels to see what is 3440 * available. 3441 */ 3442 set = !intf->curr_working_cset; 3443 intf->curr_working_cset = set; 3444 memset(&intf->wchannels[set], 0, 3445 sizeof(struct ipmi_channel_set)); 3446 3447 intf->null_user_handler = channel_handler; 3448 intf->curr_channel = 0; 3449 rv = send_channel_info_cmd(intf, 0); 3450 if (rv) { 3451 dev_warn(intf->si_dev, 3452 "Error sending channel information for channel 0, %d\n", 3453 rv); 3454 intf->null_user_handler = NULL; 3455 return -EIO; 3456 } 3457 3458 /* Wait for the channel info to be read. */ 3459 wait_event(intf->waitq, intf->channels_ready); 3460 intf->null_user_handler = NULL; 3461 } else { 3462 unsigned int set = intf->curr_working_cset; 3463 3464 /* Assume a single IPMB channel at zero. */ 3465 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3466 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3467 intf->channel_list = intf->wchannels + set; 3468 intf->channels_ready = true; 3469 } 3470 3471 return 0; 3472 } 3473 3474 static void ipmi_poll(struct ipmi_smi *intf) 3475 { 3476 if (intf->handlers->poll) 3477 intf->handlers->poll(intf->send_info); 3478 /* In case something came in */ 3479 handle_new_recv_msgs(intf); 3480 } 3481 3482 void ipmi_poll_interface(struct ipmi_user *user) 3483 { 3484 ipmi_poll(user->intf); 3485 } 3486 EXPORT_SYMBOL(ipmi_poll_interface); 3487 3488 static void redo_bmc_reg(struct work_struct *work) 3489 { 3490 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3491 bmc_reg_work); 3492 3493 if (!intf->in_shutdown) 3494 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3495 3496 kref_put(&intf->refcount, intf_free); 3497 } 3498 3499 int ipmi_add_smi(struct module *owner, 3500 const struct ipmi_smi_handlers *handlers, 3501 void *send_info, 3502 struct device *si_dev, 3503 unsigned char slave_addr) 3504 { 3505 int i, j; 3506 int rv; 3507 struct ipmi_smi *intf, *tintf; 3508 struct list_head *link; 3509 struct ipmi_device_id id; 3510 3511 /* 3512 * Make sure the driver is actually initialized, this handles 3513 * problems with initialization order. 3514 */ 3515 rv = ipmi_init_msghandler(); 3516 if (rv) 3517 return rv; 3518 3519 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3520 if (!intf) 3521 return -ENOMEM; 3522 3523 rv = init_srcu_struct(&intf->users_srcu); 3524 if (rv) { 3525 kfree(intf); 3526 return rv; 3527 } 3528 3529 intf->owner = owner; 3530 intf->bmc = &intf->tmp_bmc; 3531 INIT_LIST_HEAD(&intf->bmc->intfs); 3532 mutex_init(&intf->bmc->dyn_mutex); 3533 INIT_LIST_HEAD(&intf->bmc_link); 3534 mutex_init(&intf->bmc_reg_mutex); 3535 intf->intf_num = -1; /* Mark it invalid for now. */ 3536 kref_init(&intf->refcount); 3537 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3538 intf->si_dev = si_dev; 3539 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3540 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3541 intf->addrinfo[j].lun = 2; 3542 } 3543 if (slave_addr != 0) 3544 intf->addrinfo[0].address = slave_addr; 3545 INIT_LIST_HEAD(&intf->users); 3546 atomic_set(&intf->nr_users, 0); 3547 intf->handlers = handlers; 3548 intf->send_info = send_info; 3549 spin_lock_init(&intf->seq_lock); 3550 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3551 intf->seq_table[j].inuse = 0; 3552 intf->seq_table[j].seqid = 0; 3553 } 3554 intf->curr_seq = 0; 3555 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3556 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3557 tasklet_setup(&intf->recv_tasklet, 3558 smi_recv_tasklet); 3559 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3560 spin_lock_init(&intf->xmit_msgs_lock); 3561 INIT_LIST_HEAD(&intf->xmit_msgs); 3562 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3563 spin_lock_init(&intf->events_lock); 3564 spin_lock_init(&intf->watch_lock); 3565 atomic_set(&intf->event_waiters, 0); 3566 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3567 INIT_LIST_HEAD(&intf->waiting_events); 3568 intf->waiting_events_count = 0; 3569 mutex_init(&intf->cmd_rcvrs_mutex); 3570 spin_lock_init(&intf->maintenance_mode_lock); 3571 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3572 init_waitqueue_head(&intf->waitq); 3573 for (i = 0; i < IPMI_NUM_STATS; i++) 3574 atomic_set(&intf->stats[i], 0); 3575 3576 mutex_lock(&ipmi_interfaces_mutex); 3577 /* Look for a hole in the numbers. */ 3578 i = 0; 3579 link = &ipmi_interfaces; 3580 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3581 ipmi_interfaces_mutex_held()) { 3582 if (tintf->intf_num != i) { 3583 link = &tintf->link; 3584 break; 3585 } 3586 i++; 3587 } 3588 /* Add the new interface in numeric order. */ 3589 if (i == 0) 3590 list_add_rcu(&intf->link, &ipmi_interfaces); 3591 else 3592 list_add_tail_rcu(&intf->link, link); 3593 3594 rv = handlers->start_processing(send_info, intf); 3595 if (rv) 3596 goto out_err; 3597 3598 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3599 if (rv) { 3600 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3601 goto out_err_started; 3602 } 3603 3604 mutex_lock(&intf->bmc_reg_mutex); 3605 rv = __scan_channels(intf, &id); 3606 mutex_unlock(&intf->bmc_reg_mutex); 3607 if (rv) 3608 goto out_err_bmc_reg; 3609 3610 /* 3611 * Keep memory order straight for RCU readers. Make 3612 * sure everything else is committed to memory before 3613 * setting intf_num to mark the interface valid. 3614 */ 3615 smp_wmb(); 3616 intf->intf_num = i; 3617 mutex_unlock(&ipmi_interfaces_mutex); 3618 3619 /* After this point the interface is legal to use. */ 3620 call_smi_watchers(i, intf->si_dev); 3621 3622 return 0; 3623 3624 out_err_bmc_reg: 3625 ipmi_bmc_unregister(intf); 3626 out_err_started: 3627 if (intf->handlers->shutdown) 3628 intf->handlers->shutdown(intf->send_info); 3629 out_err: 3630 list_del_rcu(&intf->link); 3631 mutex_unlock(&ipmi_interfaces_mutex); 3632 synchronize_srcu(&ipmi_interfaces_srcu); 3633 cleanup_srcu_struct(&intf->users_srcu); 3634 kref_put(&intf->refcount, intf_free); 3635 3636 return rv; 3637 } 3638 EXPORT_SYMBOL(ipmi_add_smi); 3639 3640 static void deliver_smi_err_response(struct ipmi_smi *intf, 3641 struct ipmi_smi_msg *msg, 3642 unsigned char err) 3643 { 3644 msg->rsp[0] = msg->data[0] | 4; 3645 msg->rsp[1] = msg->data[1]; 3646 msg->rsp[2] = err; 3647 msg->rsp_size = 3; 3648 /* It's an error, so it will never requeue, no need to check return. */ 3649 handle_one_recv_msg(intf, msg); 3650 } 3651 3652 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3653 { 3654 int i; 3655 struct seq_table *ent; 3656 struct ipmi_smi_msg *msg; 3657 struct list_head *entry; 3658 struct list_head tmplist; 3659 3660 /* Clear out our transmit queues and hold the messages. */ 3661 INIT_LIST_HEAD(&tmplist); 3662 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3663 list_splice_tail(&intf->xmit_msgs, &tmplist); 3664 3665 /* Current message first, to preserve order */ 3666 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3667 /* Wait for the message to clear out. */ 3668 schedule_timeout(1); 3669 } 3670 3671 /* No need for locks, the interface is down. */ 3672 3673 /* 3674 * Return errors for all pending messages in queue and in the 3675 * tables waiting for remote responses. 3676 */ 3677 while (!list_empty(&tmplist)) { 3678 entry = tmplist.next; 3679 list_del(entry); 3680 msg = list_entry(entry, struct ipmi_smi_msg, link); 3681 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3682 } 3683 3684 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3685 ent = &intf->seq_table[i]; 3686 if (!ent->inuse) 3687 continue; 3688 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3689 } 3690 } 3691 3692 void ipmi_unregister_smi(struct ipmi_smi *intf) 3693 { 3694 struct ipmi_smi_watcher *w; 3695 int intf_num, index; 3696 3697 if (!intf) 3698 return; 3699 intf_num = intf->intf_num; 3700 mutex_lock(&ipmi_interfaces_mutex); 3701 intf->intf_num = -1; 3702 intf->in_shutdown = true; 3703 list_del_rcu(&intf->link); 3704 mutex_unlock(&ipmi_interfaces_mutex); 3705 synchronize_srcu(&ipmi_interfaces_srcu); 3706 3707 /* At this point no users can be added to the interface. */ 3708 3709 /* 3710 * Call all the watcher interfaces to tell them that 3711 * an interface is going away. 3712 */ 3713 mutex_lock(&smi_watchers_mutex); 3714 list_for_each_entry(w, &smi_watchers, link) 3715 w->smi_gone(intf_num); 3716 mutex_unlock(&smi_watchers_mutex); 3717 3718 index = srcu_read_lock(&intf->users_srcu); 3719 while (!list_empty(&intf->users)) { 3720 struct ipmi_user *user = 3721 container_of(list_next_rcu(&intf->users), 3722 struct ipmi_user, link); 3723 3724 _ipmi_destroy_user(user); 3725 } 3726 srcu_read_unlock(&intf->users_srcu, index); 3727 3728 if (intf->handlers->shutdown) 3729 intf->handlers->shutdown(intf->send_info); 3730 3731 cleanup_smi_msgs(intf); 3732 3733 ipmi_bmc_unregister(intf); 3734 3735 cleanup_srcu_struct(&intf->users_srcu); 3736 kref_put(&intf->refcount, intf_free); 3737 } 3738 EXPORT_SYMBOL(ipmi_unregister_smi); 3739 3740 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3741 struct ipmi_smi_msg *msg) 3742 { 3743 struct ipmi_ipmb_addr ipmb_addr; 3744 struct ipmi_recv_msg *recv_msg; 3745 3746 /* 3747 * This is 11, not 10, because the response must contain a 3748 * completion code. 3749 */ 3750 if (msg->rsp_size < 11) { 3751 /* Message not big enough, just ignore it. */ 3752 ipmi_inc_stat(intf, invalid_ipmb_responses); 3753 return 0; 3754 } 3755 3756 if (msg->rsp[2] != 0) { 3757 /* An error getting the response, just ignore it. */ 3758 return 0; 3759 } 3760 3761 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3762 ipmb_addr.slave_addr = msg->rsp[6]; 3763 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3764 ipmb_addr.lun = msg->rsp[7] & 3; 3765 3766 /* 3767 * It's a response from a remote entity. Look up the sequence 3768 * number and handle the response. 3769 */ 3770 if (intf_find_seq(intf, 3771 msg->rsp[7] >> 2, 3772 msg->rsp[3] & 0x0f, 3773 msg->rsp[8], 3774 (msg->rsp[4] >> 2) & (~1), 3775 (struct ipmi_addr *) &ipmb_addr, 3776 &recv_msg)) { 3777 /* 3778 * We were unable to find the sequence number, 3779 * so just nuke the message. 3780 */ 3781 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3782 return 0; 3783 } 3784 3785 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3786 /* 3787 * The other fields matched, so no need to set them, except 3788 * for netfn, which needs to be the response that was 3789 * returned, not the request value. 3790 */ 3791 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3792 recv_msg->msg.data = recv_msg->msg_data; 3793 recv_msg->msg.data_len = msg->rsp_size - 10; 3794 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3795 if (deliver_response(intf, recv_msg)) 3796 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3797 else 3798 ipmi_inc_stat(intf, handled_ipmb_responses); 3799 3800 return 0; 3801 } 3802 3803 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3804 struct ipmi_smi_msg *msg) 3805 { 3806 struct cmd_rcvr *rcvr; 3807 int rv = 0; 3808 unsigned char netfn; 3809 unsigned char cmd; 3810 unsigned char chan; 3811 struct ipmi_user *user = NULL; 3812 struct ipmi_ipmb_addr *ipmb_addr; 3813 struct ipmi_recv_msg *recv_msg; 3814 3815 if (msg->rsp_size < 10) { 3816 /* Message not big enough, just ignore it. */ 3817 ipmi_inc_stat(intf, invalid_commands); 3818 return 0; 3819 } 3820 3821 if (msg->rsp[2] != 0) { 3822 /* An error getting the response, just ignore it. */ 3823 return 0; 3824 } 3825 3826 netfn = msg->rsp[4] >> 2; 3827 cmd = msg->rsp[8]; 3828 chan = msg->rsp[3] & 0xf; 3829 3830 rcu_read_lock(); 3831 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3832 if (rcvr) { 3833 user = rcvr->user; 3834 kref_get(&user->refcount); 3835 } else 3836 user = NULL; 3837 rcu_read_unlock(); 3838 3839 if (user == NULL) { 3840 /* We didn't find a user, deliver an error response. */ 3841 ipmi_inc_stat(intf, unhandled_commands); 3842 3843 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3844 msg->data[1] = IPMI_SEND_MSG_CMD; 3845 msg->data[2] = msg->rsp[3]; 3846 msg->data[3] = msg->rsp[6]; 3847 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3848 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3849 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3850 /* rqseq/lun */ 3851 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3852 msg->data[8] = msg->rsp[8]; /* cmd */ 3853 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3854 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3855 msg->data_size = 11; 3856 3857 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); 3858 3859 rcu_read_lock(); 3860 if (!intf->in_shutdown) { 3861 smi_send(intf, intf->handlers, msg, 0); 3862 /* 3863 * We used the message, so return the value 3864 * that causes it to not be freed or 3865 * queued. 3866 */ 3867 rv = -1; 3868 } 3869 rcu_read_unlock(); 3870 } else { 3871 recv_msg = ipmi_alloc_recv_msg(); 3872 if (!recv_msg) { 3873 /* 3874 * We couldn't allocate memory for the 3875 * message, so requeue it for handling 3876 * later. 3877 */ 3878 rv = 1; 3879 kref_put(&user->refcount, free_user); 3880 } else { 3881 /* Extract the source address from the data. */ 3882 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3883 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3884 ipmb_addr->slave_addr = msg->rsp[6]; 3885 ipmb_addr->lun = msg->rsp[7] & 3; 3886 ipmb_addr->channel = msg->rsp[3] & 0xf; 3887 3888 /* 3889 * Extract the rest of the message information 3890 * from the IPMB header. 3891 */ 3892 recv_msg->user = user; 3893 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3894 recv_msg->msgid = msg->rsp[7] >> 2; 3895 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3896 recv_msg->msg.cmd = msg->rsp[8]; 3897 recv_msg->msg.data = recv_msg->msg_data; 3898 3899 /* 3900 * We chop off 10, not 9 bytes because the checksum 3901 * at the end also needs to be removed. 3902 */ 3903 recv_msg->msg.data_len = msg->rsp_size - 10; 3904 memcpy(recv_msg->msg_data, &msg->rsp[9], 3905 msg->rsp_size - 10); 3906 if (deliver_response(intf, recv_msg)) 3907 ipmi_inc_stat(intf, unhandled_commands); 3908 else 3909 ipmi_inc_stat(intf, handled_commands); 3910 } 3911 } 3912 3913 return rv; 3914 } 3915 3916 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3917 struct ipmi_smi_msg *msg) 3918 { 3919 struct cmd_rcvr *rcvr; 3920 int rv = 0; 3921 struct ipmi_user *user = NULL; 3922 struct ipmi_ipmb_direct_addr *daddr; 3923 struct ipmi_recv_msg *recv_msg; 3924 unsigned char netfn = msg->rsp[0] >> 2; 3925 unsigned char cmd = msg->rsp[3]; 3926 3927 rcu_read_lock(); 3928 /* We always use channel 0 for direct messages. */ 3929 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3930 if (rcvr) { 3931 user = rcvr->user; 3932 kref_get(&user->refcount); 3933 } else 3934 user = NULL; 3935 rcu_read_unlock(); 3936 3937 if (user == NULL) { 3938 /* We didn't find a user, deliver an error response. */ 3939 ipmi_inc_stat(intf, unhandled_commands); 3940 3941 msg->data[0] = (netfn + 1) << 2; 3942 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 3943 msg->data[1] = msg->rsp[1]; /* Addr */ 3944 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 3945 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 3946 msg->data[3] = cmd; 3947 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 3948 msg->data_size = 5; 3949 3950 rcu_read_lock(); 3951 if (!intf->in_shutdown) { 3952 smi_send(intf, intf->handlers, msg, 0); 3953 /* 3954 * We used the message, so return the value 3955 * that causes it to not be freed or 3956 * queued. 3957 */ 3958 rv = -1; 3959 } 3960 rcu_read_unlock(); 3961 } else { 3962 recv_msg = ipmi_alloc_recv_msg(); 3963 if (!recv_msg) { 3964 /* 3965 * We couldn't allocate memory for the 3966 * message, so requeue it for handling 3967 * later. 3968 */ 3969 rv = 1; 3970 kref_put(&user->refcount, free_user); 3971 } else { 3972 /* Extract the source address from the data. */ 3973 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 3974 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 3975 daddr->channel = 0; 3976 daddr->slave_addr = msg->rsp[1]; 3977 daddr->rs_lun = msg->rsp[0] & 3; 3978 daddr->rq_lun = msg->rsp[2] & 3; 3979 3980 /* 3981 * Extract the rest of the message information 3982 * from the IPMB header. 3983 */ 3984 recv_msg->user = user; 3985 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3986 recv_msg->msgid = (msg->rsp[2] >> 2); 3987 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3988 recv_msg->msg.cmd = msg->rsp[3]; 3989 recv_msg->msg.data = recv_msg->msg_data; 3990 3991 recv_msg->msg.data_len = msg->rsp_size - 4; 3992 memcpy(recv_msg->msg_data, msg->rsp + 4, 3993 msg->rsp_size - 4); 3994 if (deliver_response(intf, recv_msg)) 3995 ipmi_inc_stat(intf, unhandled_commands); 3996 else 3997 ipmi_inc_stat(intf, handled_commands); 3998 } 3999 } 4000 4001 return rv; 4002 } 4003 4004 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4005 struct ipmi_smi_msg *msg) 4006 { 4007 struct ipmi_recv_msg *recv_msg; 4008 struct ipmi_ipmb_direct_addr *daddr; 4009 4010 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4011 if (recv_msg == NULL) { 4012 dev_warn(intf->si_dev, 4013 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4014 return 0; 4015 } 4016 4017 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4018 recv_msg->msgid = msg->msgid; 4019 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4020 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4021 daddr->channel = 0; 4022 daddr->slave_addr = msg->rsp[1]; 4023 daddr->rq_lun = msg->rsp[0] & 3; 4024 daddr->rs_lun = msg->rsp[2] & 3; 4025 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4026 recv_msg->msg.cmd = msg->rsp[3]; 4027 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4028 recv_msg->msg.data = recv_msg->msg_data; 4029 recv_msg->msg.data_len = msg->rsp_size - 4; 4030 deliver_local_response(intf, recv_msg); 4031 4032 return 0; 4033 } 4034 4035 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4036 struct ipmi_smi_msg *msg) 4037 { 4038 struct ipmi_lan_addr lan_addr; 4039 struct ipmi_recv_msg *recv_msg; 4040 4041 4042 /* 4043 * This is 13, not 12, because the response must contain a 4044 * completion code. 4045 */ 4046 if (msg->rsp_size < 13) { 4047 /* Message not big enough, just ignore it. */ 4048 ipmi_inc_stat(intf, invalid_lan_responses); 4049 return 0; 4050 } 4051 4052 if (msg->rsp[2] != 0) { 4053 /* An error getting the response, just ignore it. */ 4054 return 0; 4055 } 4056 4057 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4058 lan_addr.session_handle = msg->rsp[4]; 4059 lan_addr.remote_SWID = msg->rsp[8]; 4060 lan_addr.local_SWID = msg->rsp[5]; 4061 lan_addr.channel = msg->rsp[3] & 0x0f; 4062 lan_addr.privilege = msg->rsp[3] >> 4; 4063 lan_addr.lun = msg->rsp[9] & 3; 4064 4065 /* 4066 * It's a response from a remote entity. Look up the sequence 4067 * number and handle the response. 4068 */ 4069 if (intf_find_seq(intf, 4070 msg->rsp[9] >> 2, 4071 msg->rsp[3] & 0x0f, 4072 msg->rsp[10], 4073 (msg->rsp[6] >> 2) & (~1), 4074 (struct ipmi_addr *) &lan_addr, 4075 &recv_msg)) { 4076 /* 4077 * We were unable to find the sequence number, 4078 * so just nuke the message. 4079 */ 4080 ipmi_inc_stat(intf, unhandled_lan_responses); 4081 return 0; 4082 } 4083 4084 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4085 /* 4086 * The other fields matched, so no need to set them, except 4087 * for netfn, which needs to be the response that was 4088 * returned, not the request value. 4089 */ 4090 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4091 recv_msg->msg.data = recv_msg->msg_data; 4092 recv_msg->msg.data_len = msg->rsp_size - 12; 4093 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4094 if (deliver_response(intf, recv_msg)) 4095 ipmi_inc_stat(intf, unhandled_lan_responses); 4096 else 4097 ipmi_inc_stat(intf, handled_lan_responses); 4098 4099 return 0; 4100 } 4101 4102 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4103 struct ipmi_smi_msg *msg) 4104 { 4105 struct cmd_rcvr *rcvr; 4106 int rv = 0; 4107 unsigned char netfn; 4108 unsigned char cmd; 4109 unsigned char chan; 4110 struct ipmi_user *user = NULL; 4111 struct ipmi_lan_addr *lan_addr; 4112 struct ipmi_recv_msg *recv_msg; 4113 4114 if (msg->rsp_size < 12) { 4115 /* Message not big enough, just ignore it. */ 4116 ipmi_inc_stat(intf, invalid_commands); 4117 return 0; 4118 } 4119 4120 if (msg->rsp[2] != 0) { 4121 /* An error getting the response, just ignore it. */ 4122 return 0; 4123 } 4124 4125 netfn = msg->rsp[6] >> 2; 4126 cmd = msg->rsp[10]; 4127 chan = msg->rsp[3] & 0xf; 4128 4129 rcu_read_lock(); 4130 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4131 if (rcvr) { 4132 user = rcvr->user; 4133 kref_get(&user->refcount); 4134 } else 4135 user = NULL; 4136 rcu_read_unlock(); 4137 4138 if (user == NULL) { 4139 /* We didn't find a user, just give up. */ 4140 ipmi_inc_stat(intf, unhandled_commands); 4141 4142 /* 4143 * Don't do anything with these messages, just allow 4144 * them to be freed. 4145 */ 4146 rv = 0; 4147 } else { 4148 recv_msg = ipmi_alloc_recv_msg(); 4149 if (!recv_msg) { 4150 /* 4151 * We couldn't allocate memory for the 4152 * message, so requeue it for handling later. 4153 */ 4154 rv = 1; 4155 kref_put(&user->refcount, free_user); 4156 } else { 4157 /* Extract the source address from the data. */ 4158 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4159 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4160 lan_addr->session_handle = msg->rsp[4]; 4161 lan_addr->remote_SWID = msg->rsp[8]; 4162 lan_addr->local_SWID = msg->rsp[5]; 4163 lan_addr->lun = msg->rsp[9] & 3; 4164 lan_addr->channel = msg->rsp[3] & 0xf; 4165 lan_addr->privilege = msg->rsp[3] >> 4; 4166 4167 /* 4168 * Extract the rest of the message information 4169 * from the IPMB header. 4170 */ 4171 recv_msg->user = user; 4172 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4173 recv_msg->msgid = msg->rsp[9] >> 2; 4174 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4175 recv_msg->msg.cmd = msg->rsp[10]; 4176 recv_msg->msg.data = recv_msg->msg_data; 4177 4178 /* 4179 * We chop off 12, not 11 bytes because the checksum 4180 * at the end also needs to be removed. 4181 */ 4182 recv_msg->msg.data_len = msg->rsp_size - 12; 4183 memcpy(recv_msg->msg_data, &msg->rsp[11], 4184 msg->rsp_size - 12); 4185 if (deliver_response(intf, recv_msg)) 4186 ipmi_inc_stat(intf, unhandled_commands); 4187 else 4188 ipmi_inc_stat(intf, handled_commands); 4189 } 4190 } 4191 4192 return rv; 4193 } 4194 4195 /* 4196 * This routine will handle "Get Message" command responses with 4197 * channels that use an OEM Medium. The message format belongs to 4198 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4199 * Chapter 22, sections 22.6 and 22.24 for more details. 4200 */ 4201 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4202 struct ipmi_smi_msg *msg) 4203 { 4204 struct cmd_rcvr *rcvr; 4205 int rv = 0; 4206 unsigned char netfn; 4207 unsigned char cmd; 4208 unsigned char chan; 4209 struct ipmi_user *user = NULL; 4210 struct ipmi_system_interface_addr *smi_addr; 4211 struct ipmi_recv_msg *recv_msg; 4212 4213 /* 4214 * We expect the OEM SW to perform error checking 4215 * so we just do some basic sanity checks 4216 */ 4217 if (msg->rsp_size < 4) { 4218 /* Message not big enough, just ignore it. */ 4219 ipmi_inc_stat(intf, invalid_commands); 4220 return 0; 4221 } 4222 4223 if (msg->rsp[2] != 0) { 4224 /* An error getting the response, just ignore it. */ 4225 return 0; 4226 } 4227 4228 /* 4229 * This is an OEM Message so the OEM needs to know how 4230 * handle the message. We do no interpretation. 4231 */ 4232 netfn = msg->rsp[0] >> 2; 4233 cmd = msg->rsp[1]; 4234 chan = msg->rsp[3] & 0xf; 4235 4236 rcu_read_lock(); 4237 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4238 if (rcvr) { 4239 user = rcvr->user; 4240 kref_get(&user->refcount); 4241 } else 4242 user = NULL; 4243 rcu_read_unlock(); 4244 4245 if (user == NULL) { 4246 /* We didn't find a user, just give up. */ 4247 ipmi_inc_stat(intf, unhandled_commands); 4248 4249 /* 4250 * Don't do anything with these messages, just allow 4251 * them to be freed. 4252 */ 4253 4254 rv = 0; 4255 } else { 4256 recv_msg = ipmi_alloc_recv_msg(); 4257 if (!recv_msg) { 4258 /* 4259 * We couldn't allocate memory for the 4260 * message, so requeue it for handling 4261 * later. 4262 */ 4263 rv = 1; 4264 kref_put(&user->refcount, free_user); 4265 } else { 4266 /* 4267 * OEM Messages are expected to be delivered via 4268 * the system interface to SMS software. We might 4269 * need to visit this again depending on OEM 4270 * requirements 4271 */ 4272 smi_addr = ((struct ipmi_system_interface_addr *) 4273 &recv_msg->addr); 4274 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4275 smi_addr->channel = IPMI_BMC_CHANNEL; 4276 smi_addr->lun = msg->rsp[0] & 3; 4277 4278 recv_msg->user = user; 4279 recv_msg->user_msg_data = NULL; 4280 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4281 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4282 recv_msg->msg.cmd = msg->rsp[1]; 4283 recv_msg->msg.data = recv_msg->msg_data; 4284 4285 /* 4286 * The message starts at byte 4 which follows the 4287 * the Channel Byte in the "GET MESSAGE" command 4288 */ 4289 recv_msg->msg.data_len = msg->rsp_size - 4; 4290 memcpy(recv_msg->msg_data, &msg->rsp[4], 4291 msg->rsp_size - 4); 4292 if (deliver_response(intf, recv_msg)) 4293 ipmi_inc_stat(intf, unhandled_commands); 4294 else 4295 ipmi_inc_stat(intf, handled_commands); 4296 } 4297 } 4298 4299 return rv; 4300 } 4301 4302 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4303 struct ipmi_smi_msg *msg) 4304 { 4305 struct ipmi_system_interface_addr *smi_addr; 4306 4307 recv_msg->msgid = 0; 4308 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4309 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4310 smi_addr->channel = IPMI_BMC_CHANNEL; 4311 smi_addr->lun = msg->rsp[0] & 3; 4312 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4313 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4314 recv_msg->msg.cmd = msg->rsp[1]; 4315 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4316 recv_msg->msg.data = recv_msg->msg_data; 4317 recv_msg->msg.data_len = msg->rsp_size - 3; 4318 } 4319 4320 static int handle_read_event_rsp(struct ipmi_smi *intf, 4321 struct ipmi_smi_msg *msg) 4322 { 4323 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4324 struct list_head msgs; 4325 struct ipmi_user *user; 4326 int rv = 0, deliver_count = 0, index; 4327 unsigned long flags; 4328 4329 if (msg->rsp_size < 19) { 4330 /* Message is too small to be an IPMB event. */ 4331 ipmi_inc_stat(intf, invalid_events); 4332 return 0; 4333 } 4334 4335 if (msg->rsp[2] != 0) { 4336 /* An error getting the event, just ignore it. */ 4337 return 0; 4338 } 4339 4340 INIT_LIST_HEAD(&msgs); 4341 4342 spin_lock_irqsave(&intf->events_lock, flags); 4343 4344 ipmi_inc_stat(intf, events); 4345 4346 /* 4347 * Allocate and fill in one message for every user that is 4348 * getting events. 4349 */ 4350 index = srcu_read_lock(&intf->users_srcu); 4351 list_for_each_entry_rcu(user, &intf->users, link) { 4352 if (!user->gets_events) 4353 continue; 4354 4355 recv_msg = ipmi_alloc_recv_msg(); 4356 if (!recv_msg) { 4357 rcu_read_unlock(); 4358 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4359 link) { 4360 list_del(&recv_msg->link); 4361 ipmi_free_recv_msg(recv_msg); 4362 } 4363 /* 4364 * We couldn't allocate memory for the 4365 * message, so requeue it for handling 4366 * later. 4367 */ 4368 rv = 1; 4369 goto out; 4370 } 4371 4372 deliver_count++; 4373 4374 copy_event_into_recv_msg(recv_msg, msg); 4375 recv_msg->user = user; 4376 kref_get(&user->refcount); 4377 list_add_tail(&recv_msg->link, &msgs); 4378 } 4379 srcu_read_unlock(&intf->users_srcu, index); 4380 4381 if (deliver_count) { 4382 /* Now deliver all the messages. */ 4383 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4384 list_del(&recv_msg->link); 4385 deliver_local_response(intf, recv_msg); 4386 } 4387 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4388 /* 4389 * No one to receive the message, put it in queue if there's 4390 * not already too many things in the queue. 4391 */ 4392 recv_msg = ipmi_alloc_recv_msg(); 4393 if (!recv_msg) { 4394 /* 4395 * We couldn't allocate memory for the 4396 * message, so requeue it for handling 4397 * later. 4398 */ 4399 rv = 1; 4400 goto out; 4401 } 4402 4403 copy_event_into_recv_msg(recv_msg, msg); 4404 list_add_tail(&recv_msg->link, &intf->waiting_events); 4405 intf->waiting_events_count++; 4406 } else if (!intf->event_msg_printed) { 4407 /* 4408 * There's too many things in the queue, discard this 4409 * message. 4410 */ 4411 dev_warn(intf->si_dev, 4412 "Event queue full, discarding incoming events\n"); 4413 intf->event_msg_printed = 1; 4414 } 4415 4416 out: 4417 spin_unlock_irqrestore(&intf->events_lock, flags); 4418 4419 return rv; 4420 } 4421 4422 static int handle_bmc_rsp(struct ipmi_smi *intf, 4423 struct ipmi_smi_msg *msg) 4424 { 4425 struct ipmi_recv_msg *recv_msg; 4426 struct ipmi_system_interface_addr *smi_addr; 4427 4428 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4429 if (recv_msg == NULL) { 4430 dev_warn(intf->si_dev, 4431 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4432 return 0; 4433 } 4434 4435 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4436 recv_msg->msgid = msg->msgid; 4437 smi_addr = ((struct ipmi_system_interface_addr *) 4438 &recv_msg->addr); 4439 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4440 smi_addr->channel = IPMI_BMC_CHANNEL; 4441 smi_addr->lun = msg->rsp[0] & 3; 4442 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4443 recv_msg->msg.cmd = msg->rsp[1]; 4444 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4445 recv_msg->msg.data = recv_msg->msg_data; 4446 recv_msg->msg.data_len = msg->rsp_size - 2; 4447 deliver_local_response(intf, recv_msg); 4448 4449 return 0; 4450 } 4451 4452 /* 4453 * Handle a received message. Return 1 if the message should be requeued, 4454 * 0 if the message should be freed, or -1 if the message should not 4455 * be freed or requeued. 4456 */ 4457 static int handle_one_recv_msg(struct ipmi_smi *intf, 4458 struct ipmi_smi_msg *msg) 4459 { 4460 int requeue = 0; 4461 int chan; 4462 unsigned char cc; 4463 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4464 4465 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); 4466 4467 if (msg->rsp_size < 2) { 4468 /* Message is too small to be correct. */ 4469 dev_warn(intf->si_dev, 4470 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4471 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4472 4473 return_unspecified: 4474 /* Generate an error response for the message. */ 4475 msg->rsp[0] = msg->data[0] | (1 << 2); 4476 msg->rsp[1] = msg->data[1]; 4477 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4478 msg->rsp_size = 3; 4479 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4480 /* commands must have at least 4 bytes, responses 5. */ 4481 if (is_cmd && (msg->rsp_size < 4)) { 4482 ipmi_inc_stat(intf, invalid_commands); 4483 goto out; 4484 } 4485 if (!is_cmd && (msg->rsp_size < 5)) { 4486 ipmi_inc_stat(intf, invalid_ipmb_responses); 4487 /* Construct a valid error response. */ 4488 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4489 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4490 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4491 msg->rsp[1] = msg->data[1]; /* Addr */ 4492 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4493 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4494 msg->rsp[3] = msg->data[3]; /* Cmd */ 4495 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4496 msg->rsp_size = 5; 4497 } 4498 } else if ((msg->data_size >= 2) 4499 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4500 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4501 && (msg->user_data == NULL)) { 4502 4503 if (intf->in_shutdown) 4504 goto out; 4505 4506 /* 4507 * This is the local response to a command send, start 4508 * the timer for these. The user_data will not be 4509 * NULL if this is a response send, and we will let 4510 * response sends just go through. 4511 */ 4512 4513 /* 4514 * Check for errors, if we get certain errors (ones 4515 * that mean basically we can try again later), we 4516 * ignore them and start the timer. Otherwise we 4517 * report the error immediately. 4518 */ 4519 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4520 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4521 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4522 && (msg->rsp[2] != IPMI_BUS_ERR) 4523 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4524 int ch = msg->rsp[3] & 0xf; 4525 struct ipmi_channel *chans; 4526 4527 /* Got an error sending the message, handle it. */ 4528 4529 chans = READ_ONCE(intf->channel_list)->c; 4530 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4531 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4532 ipmi_inc_stat(intf, sent_lan_command_errs); 4533 else 4534 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4535 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4536 } else 4537 /* The message was sent, start the timer. */ 4538 intf_start_seq_timer(intf, msg->msgid); 4539 requeue = 0; 4540 goto out; 4541 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4542 || (msg->rsp[1] != msg->data[1])) { 4543 /* 4544 * The NetFN and Command in the response is not even 4545 * marginally correct. 4546 */ 4547 dev_warn(intf->si_dev, 4548 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4549 (msg->data[0] >> 2) | 1, msg->data[1], 4550 msg->rsp[0] >> 2, msg->rsp[1]); 4551 4552 goto return_unspecified; 4553 } 4554 4555 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4556 if ((msg->data[0] >> 2) & 1) { 4557 /* It's a response to a sent response. */ 4558 chan = 0; 4559 cc = msg->rsp[4]; 4560 goto process_response_response; 4561 } 4562 if (is_cmd) 4563 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4564 else 4565 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4566 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4567 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4568 && (msg->user_data != NULL)) { 4569 /* 4570 * It's a response to a response we sent. For this we 4571 * deliver a send message response to the user. 4572 */ 4573 struct ipmi_recv_msg *recv_msg; 4574 4575 chan = msg->data[2] & 0x0f; 4576 if (chan >= IPMI_MAX_CHANNELS) 4577 /* Invalid channel number */ 4578 goto out; 4579 cc = msg->rsp[2]; 4580 4581 process_response_response: 4582 recv_msg = msg->user_data; 4583 4584 requeue = 0; 4585 if (!recv_msg) 4586 goto out; 4587 4588 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4589 recv_msg->msg.data = recv_msg->msg_data; 4590 recv_msg->msg_data[0] = cc; 4591 recv_msg->msg.data_len = 1; 4592 deliver_local_response(intf, recv_msg); 4593 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4594 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4595 struct ipmi_channel *chans; 4596 4597 /* It's from the receive queue. */ 4598 chan = msg->rsp[3] & 0xf; 4599 if (chan >= IPMI_MAX_CHANNELS) { 4600 /* Invalid channel number */ 4601 requeue = 0; 4602 goto out; 4603 } 4604 4605 /* 4606 * We need to make sure the channels have been initialized. 4607 * The channel_handler routine will set the "curr_channel" 4608 * equal to or greater than IPMI_MAX_CHANNELS when all the 4609 * channels for this interface have been initialized. 4610 */ 4611 if (!intf->channels_ready) { 4612 requeue = 0; /* Throw the message away */ 4613 goto out; 4614 } 4615 4616 chans = READ_ONCE(intf->channel_list)->c; 4617 4618 switch (chans[chan].medium) { 4619 case IPMI_CHANNEL_MEDIUM_IPMB: 4620 if (msg->rsp[4] & 0x04) { 4621 /* 4622 * It's a response, so find the 4623 * requesting message and send it up. 4624 */ 4625 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4626 } else { 4627 /* 4628 * It's a command to the SMS from some other 4629 * entity. Handle that. 4630 */ 4631 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4632 } 4633 break; 4634 4635 case IPMI_CHANNEL_MEDIUM_8023LAN: 4636 case IPMI_CHANNEL_MEDIUM_ASYNC: 4637 if (msg->rsp[6] & 0x04) { 4638 /* 4639 * It's a response, so find the 4640 * requesting message and send it up. 4641 */ 4642 requeue = handle_lan_get_msg_rsp(intf, msg); 4643 } else { 4644 /* 4645 * It's a command to the SMS from some other 4646 * entity. Handle that. 4647 */ 4648 requeue = handle_lan_get_msg_cmd(intf, msg); 4649 } 4650 break; 4651 4652 default: 4653 /* Check for OEM Channels. Clients had better 4654 register for these commands. */ 4655 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4656 && (chans[chan].medium 4657 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4658 requeue = handle_oem_get_msg_cmd(intf, msg); 4659 } else { 4660 /* 4661 * We don't handle the channel type, so just 4662 * free the message. 4663 */ 4664 requeue = 0; 4665 } 4666 } 4667 4668 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4669 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4670 /* It's an asynchronous event. */ 4671 requeue = handle_read_event_rsp(intf, msg); 4672 } else { 4673 /* It's a response from the local BMC. */ 4674 requeue = handle_bmc_rsp(intf, msg); 4675 } 4676 4677 out: 4678 return requeue; 4679 } 4680 4681 /* 4682 * If there are messages in the queue or pretimeouts, handle them. 4683 */ 4684 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4685 { 4686 struct ipmi_smi_msg *smi_msg; 4687 unsigned long flags = 0; 4688 int rv; 4689 int run_to_completion = intf->run_to_completion; 4690 4691 /* See if any waiting messages need to be processed. */ 4692 if (!run_to_completion) 4693 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4694 while (!list_empty(&intf->waiting_rcv_msgs)) { 4695 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4696 struct ipmi_smi_msg, link); 4697 list_del(&smi_msg->link); 4698 if (!run_to_completion) 4699 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4700 flags); 4701 rv = handle_one_recv_msg(intf, smi_msg); 4702 if (!run_to_completion) 4703 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4704 if (rv > 0) { 4705 /* 4706 * To preserve message order, quit if we 4707 * can't handle a message. Add the message 4708 * back at the head, this is safe because this 4709 * tasklet is the only thing that pulls the 4710 * messages. 4711 */ 4712 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4713 break; 4714 } else { 4715 if (rv == 0) 4716 /* Message handled */ 4717 ipmi_free_smi_msg(smi_msg); 4718 /* If rv < 0, fatal error, del but don't free. */ 4719 } 4720 } 4721 if (!run_to_completion) 4722 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4723 4724 /* 4725 * If the pretimout count is non-zero, decrement one from it and 4726 * deliver pretimeouts to all the users. 4727 */ 4728 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4729 struct ipmi_user *user; 4730 int index; 4731 4732 index = srcu_read_lock(&intf->users_srcu); 4733 list_for_each_entry_rcu(user, &intf->users, link) { 4734 if (user->handler->ipmi_watchdog_pretimeout) 4735 user->handler->ipmi_watchdog_pretimeout( 4736 user->handler_data); 4737 } 4738 srcu_read_unlock(&intf->users_srcu, index); 4739 } 4740 } 4741 4742 static void smi_recv_tasklet(struct tasklet_struct *t) 4743 { 4744 unsigned long flags = 0; /* keep us warning-free. */ 4745 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4746 int run_to_completion = intf->run_to_completion; 4747 struct ipmi_smi_msg *newmsg = NULL; 4748 4749 /* 4750 * Start the next message if available. 4751 * 4752 * Do this here, not in the actual receiver, because we may deadlock 4753 * because the lower layer is allowed to hold locks while calling 4754 * message delivery. 4755 */ 4756 4757 rcu_read_lock(); 4758 4759 if (!run_to_completion) 4760 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4761 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4762 struct list_head *entry = NULL; 4763 4764 /* Pick the high priority queue first. */ 4765 if (!list_empty(&intf->hp_xmit_msgs)) 4766 entry = intf->hp_xmit_msgs.next; 4767 else if (!list_empty(&intf->xmit_msgs)) 4768 entry = intf->xmit_msgs.next; 4769 4770 if (entry) { 4771 list_del(entry); 4772 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4773 intf->curr_msg = newmsg; 4774 } 4775 } 4776 4777 if (!run_to_completion) 4778 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4779 if (newmsg) 4780 intf->handlers->sender(intf->send_info, newmsg); 4781 4782 rcu_read_unlock(); 4783 4784 handle_new_recv_msgs(intf); 4785 } 4786 4787 /* Handle a new message from the lower layer. */ 4788 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4789 struct ipmi_smi_msg *msg) 4790 { 4791 unsigned long flags = 0; /* keep us warning-free. */ 4792 int run_to_completion = intf->run_to_completion; 4793 4794 /* 4795 * To preserve message order, we keep a queue and deliver from 4796 * a tasklet. 4797 */ 4798 if (!run_to_completion) 4799 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4800 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4801 if (!run_to_completion) 4802 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4803 flags); 4804 4805 if (!run_to_completion) 4806 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4807 /* 4808 * We can get an asynchronous event or receive message in addition 4809 * to commands we send. 4810 */ 4811 if (msg == intf->curr_msg) 4812 intf->curr_msg = NULL; 4813 if (!run_to_completion) 4814 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4815 4816 if (run_to_completion) 4817 smi_recv_tasklet(&intf->recv_tasklet); 4818 else 4819 tasklet_schedule(&intf->recv_tasklet); 4820 } 4821 EXPORT_SYMBOL(ipmi_smi_msg_received); 4822 4823 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4824 { 4825 if (intf->in_shutdown) 4826 return; 4827 4828 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4829 tasklet_schedule(&intf->recv_tasklet); 4830 } 4831 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4832 4833 static struct ipmi_smi_msg * 4834 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4835 unsigned char seq, long seqid) 4836 { 4837 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4838 if (!smi_msg) 4839 /* 4840 * If we can't allocate the message, then just return, we 4841 * get 4 retries, so this should be ok. 4842 */ 4843 return NULL; 4844 4845 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4846 smi_msg->data_size = recv_msg->msg.data_len; 4847 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4848 4849 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); 4850 4851 return smi_msg; 4852 } 4853 4854 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4855 struct list_head *timeouts, 4856 unsigned long timeout_period, 4857 int slot, unsigned long *flags, 4858 bool *need_timer) 4859 { 4860 struct ipmi_recv_msg *msg; 4861 4862 if (intf->in_shutdown) 4863 return; 4864 4865 if (!ent->inuse) 4866 return; 4867 4868 if (timeout_period < ent->timeout) { 4869 ent->timeout -= timeout_period; 4870 *need_timer = true; 4871 return; 4872 } 4873 4874 if (ent->retries_left == 0) { 4875 /* The message has used all its retries. */ 4876 ent->inuse = 0; 4877 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4878 msg = ent->recv_msg; 4879 list_add_tail(&msg->link, timeouts); 4880 if (ent->broadcast) 4881 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4882 else if (is_lan_addr(&ent->recv_msg->addr)) 4883 ipmi_inc_stat(intf, timed_out_lan_commands); 4884 else 4885 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4886 } else { 4887 struct ipmi_smi_msg *smi_msg; 4888 /* More retries, send again. */ 4889 4890 *need_timer = true; 4891 4892 /* 4893 * Start with the max timer, set to normal timer after 4894 * the message is sent. 4895 */ 4896 ent->timeout = MAX_MSG_TIMEOUT; 4897 ent->retries_left--; 4898 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4899 ent->seqid); 4900 if (!smi_msg) { 4901 if (is_lan_addr(&ent->recv_msg->addr)) 4902 ipmi_inc_stat(intf, 4903 dropped_rexmit_lan_commands); 4904 else 4905 ipmi_inc_stat(intf, 4906 dropped_rexmit_ipmb_commands); 4907 return; 4908 } 4909 4910 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4911 4912 /* 4913 * Send the new message. We send with a zero 4914 * priority. It timed out, I doubt time is that 4915 * critical now, and high priority messages are really 4916 * only for messages to the local MC, which don't get 4917 * resent. 4918 */ 4919 if (intf->handlers) { 4920 if (is_lan_addr(&ent->recv_msg->addr)) 4921 ipmi_inc_stat(intf, 4922 retransmitted_lan_commands); 4923 else 4924 ipmi_inc_stat(intf, 4925 retransmitted_ipmb_commands); 4926 4927 smi_send(intf, intf->handlers, smi_msg, 0); 4928 } else 4929 ipmi_free_smi_msg(smi_msg); 4930 4931 spin_lock_irqsave(&intf->seq_lock, *flags); 4932 } 4933 } 4934 4935 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4936 unsigned long timeout_period) 4937 { 4938 struct list_head timeouts; 4939 struct ipmi_recv_msg *msg, *msg2; 4940 unsigned long flags; 4941 int i; 4942 bool need_timer = false; 4943 4944 if (!intf->bmc_registered) { 4945 kref_get(&intf->refcount); 4946 if (!schedule_work(&intf->bmc_reg_work)) { 4947 kref_put(&intf->refcount, intf_free); 4948 need_timer = true; 4949 } 4950 } 4951 4952 /* 4953 * Go through the seq table and find any messages that 4954 * have timed out, putting them in the timeouts 4955 * list. 4956 */ 4957 INIT_LIST_HEAD(&timeouts); 4958 spin_lock_irqsave(&intf->seq_lock, flags); 4959 if (intf->ipmb_maintenance_mode_timeout) { 4960 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4961 intf->ipmb_maintenance_mode_timeout = 0; 4962 else 4963 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4964 } 4965 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4966 check_msg_timeout(intf, &intf->seq_table[i], 4967 &timeouts, timeout_period, i, 4968 &flags, &need_timer); 4969 spin_unlock_irqrestore(&intf->seq_lock, flags); 4970 4971 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4972 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4973 4974 /* 4975 * Maintenance mode handling. Check the timeout 4976 * optimistically before we claim the lock. It may 4977 * mean a timeout gets missed occasionally, but that 4978 * only means the timeout gets extended by one period 4979 * in that case. No big deal, and it avoids the lock 4980 * most of the time. 4981 */ 4982 if (intf->auto_maintenance_timeout > 0) { 4983 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4984 if (intf->auto_maintenance_timeout > 0) { 4985 intf->auto_maintenance_timeout 4986 -= timeout_period; 4987 if (!intf->maintenance_mode 4988 && (intf->auto_maintenance_timeout <= 0)) { 4989 intf->maintenance_mode_enable = false; 4990 maintenance_mode_update(intf); 4991 } 4992 } 4993 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4994 flags); 4995 } 4996 4997 tasklet_schedule(&intf->recv_tasklet); 4998 4999 return need_timer; 5000 } 5001 5002 static void ipmi_request_event(struct ipmi_smi *intf) 5003 { 5004 /* No event requests when in maintenance mode. */ 5005 if (intf->maintenance_mode_enable) 5006 return; 5007 5008 if (!intf->in_shutdown) 5009 intf->handlers->request_events(intf->send_info); 5010 } 5011 5012 static struct timer_list ipmi_timer; 5013 5014 static atomic_t stop_operation; 5015 5016 static void ipmi_timeout(struct timer_list *unused) 5017 { 5018 struct ipmi_smi *intf; 5019 bool need_timer = false; 5020 int index; 5021 5022 if (atomic_read(&stop_operation)) 5023 return; 5024 5025 index = srcu_read_lock(&ipmi_interfaces_srcu); 5026 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5027 if (atomic_read(&intf->event_waiters)) { 5028 intf->ticks_to_req_ev--; 5029 if (intf->ticks_to_req_ev == 0) { 5030 ipmi_request_event(intf); 5031 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5032 } 5033 need_timer = true; 5034 } 5035 5036 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5037 } 5038 srcu_read_unlock(&ipmi_interfaces_srcu, index); 5039 5040 if (need_timer) 5041 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5042 } 5043 5044 static void need_waiter(struct ipmi_smi *intf) 5045 { 5046 /* Racy, but worst case we start the timer twice. */ 5047 if (!timer_pending(&ipmi_timer)) 5048 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5049 } 5050 5051 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5052 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5053 5054 static void free_smi_msg(struct ipmi_smi_msg *msg) 5055 { 5056 atomic_dec(&smi_msg_inuse_count); 5057 /* Try to keep as much stuff out of the panic path as possible. */ 5058 if (!oops_in_progress) 5059 kfree(msg); 5060 } 5061 5062 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5063 { 5064 struct ipmi_smi_msg *rv; 5065 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5066 if (rv) { 5067 rv->done = free_smi_msg; 5068 rv->user_data = NULL; 5069 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5070 atomic_inc(&smi_msg_inuse_count); 5071 } 5072 return rv; 5073 } 5074 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5075 5076 static void free_recv_msg(struct ipmi_recv_msg *msg) 5077 { 5078 atomic_dec(&recv_msg_inuse_count); 5079 /* Try to keep as much stuff out of the panic path as possible. */ 5080 if (!oops_in_progress) 5081 kfree(msg); 5082 } 5083 5084 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5085 { 5086 struct ipmi_recv_msg *rv; 5087 5088 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5089 if (rv) { 5090 rv->user = NULL; 5091 rv->done = free_recv_msg; 5092 atomic_inc(&recv_msg_inuse_count); 5093 } 5094 return rv; 5095 } 5096 5097 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5098 { 5099 if (msg->user && !oops_in_progress) 5100 kref_put(&msg->user->refcount, free_user); 5101 msg->done(msg); 5102 } 5103 EXPORT_SYMBOL(ipmi_free_recv_msg); 5104 5105 static atomic_t panic_done_count = ATOMIC_INIT(0); 5106 5107 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5108 { 5109 atomic_dec(&panic_done_count); 5110 } 5111 5112 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5113 { 5114 atomic_dec(&panic_done_count); 5115 } 5116 5117 /* 5118 * Inside a panic, send a message and wait for a response. 5119 */ 5120 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5121 struct ipmi_addr *addr, 5122 struct kernel_ipmi_msg *msg) 5123 { 5124 struct ipmi_smi_msg smi_msg; 5125 struct ipmi_recv_msg recv_msg; 5126 int rv; 5127 5128 smi_msg.done = dummy_smi_done_handler; 5129 recv_msg.done = dummy_recv_done_handler; 5130 atomic_add(2, &panic_done_count); 5131 rv = i_ipmi_request(NULL, 5132 intf, 5133 addr, 5134 0, 5135 msg, 5136 intf, 5137 &smi_msg, 5138 &recv_msg, 5139 0, 5140 intf->addrinfo[0].address, 5141 intf->addrinfo[0].lun, 5142 0, 1); /* Don't retry, and don't wait. */ 5143 if (rv) 5144 atomic_sub(2, &panic_done_count); 5145 else if (intf->handlers->flush_messages) 5146 intf->handlers->flush_messages(intf->send_info); 5147 5148 while (atomic_read(&panic_done_count) != 0) 5149 ipmi_poll(intf); 5150 } 5151 5152 static void event_receiver_fetcher(struct ipmi_smi *intf, 5153 struct ipmi_recv_msg *msg) 5154 { 5155 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5156 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5157 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5158 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5159 /* A get event receiver command, save it. */ 5160 intf->event_receiver = msg->msg.data[1]; 5161 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5162 } 5163 } 5164 5165 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5166 { 5167 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5168 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5169 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5170 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5171 /* 5172 * A get device id command, save if we are an event 5173 * receiver or generator. 5174 */ 5175 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5176 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5177 } 5178 } 5179 5180 static void send_panic_events(struct ipmi_smi *intf, char *str) 5181 { 5182 struct kernel_ipmi_msg msg; 5183 unsigned char data[16]; 5184 struct ipmi_system_interface_addr *si; 5185 struct ipmi_addr addr; 5186 char *p = str; 5187 struct ipmi_ipmb_addr *ipmb; 5188 int j; 5189 5190 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5191 return; 5192 5193 si = (struct ipmi_system_interface_addr *) &addr; 5194 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5195 si->channel = IPMI_BMC_CHANNEL; 5196 si->lun = 0; 5197 5198 /* Fill in an event telling that we have failed. */ 5199 msg.netfn = 0x04; /* Sensor or Event. */ 5200 msg.cmd = 2; /* Platform event command. */ 5201 msg.data = data; 5202 msg.data_len = 8; 5203 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5204 data[1] = 0x03; /* This is for IPMI 1.0. */ 5205 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5206 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5207 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5208 5209 /* 5210 * Put a few breadcrumbs in. Hopefully later we can add more things 5211 * to make the panic events more useful. 5212 */ 5213 if (str) { 5214 data[3] = str[0]; 5215 data[6] = str[1]; 5216 data[7] = str[2]; 5217 } 5218 5219 /* Send the event announcing the panic. */ 5220 ipmi_panic_request_and_wait(intf, &addr, &msg); 5221 5222 /* 5223 * On every interface, dump a bunch of OEM event holding the 5224 * string. 5225 */ 5226 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5227 return; 5228 5229 /* 5230 * intf_num is used as an marker to tell if the 5231 * interface is valid. Thus we need a read barrier to 5232 * make sure data fetched before checking intf_num 5233 * won't be used. 5234 */ 5235 smp_rmb(); 5236 5237 /* 5238 * First job here is to figure out where to send the 5239 * OEM events. There's no way in IPMI to send OEM 5240 * events using an event send command, so we have to 5241 * find the SEL to put them in and stick them in 5242 * there. 5243 */ 5244 5245 /* Get capabilities from the get device id. */ 5246 intf->local_sel_device = 0; 5247 intf->local_event_generator = 0; 5248 intf->event_receiver = 0; 5249 5250 /* Request the device info from the local MC. */ 5251 msg.netfn = IPMI_NETFN_APP_REQUEST; 5252 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5253 msg.data = NULL; 5254 msg.data_len = 0; 5255 intf->null_user_handler = device_id_fetcher; 5256 ipmi_panic_request_and_wait(intf, &addr, &msg); 5257 5258 if (intf->local_event_generator) { 5259 /* Request the event receiver from the local MC. */ 5260 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5261 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5262 msg.data = NULL; 5263 msg.data_len = 0; 5264 intf->null_user_handler = event_receiver_fetcher; 5265 ipmi_panic_request_and_wait(intf, &addr, &msg); 5266 } 5267 intf->null_user_handler = NULL; 5268 5269 /* 5270 * Validate the event receiver. The low bit must not 5271 * be 1 (it must be a valid IPMB address), it cannot 5272 * be zero, and it must not be my address. 5273 */ 5274 if (((intf->event_receiver & 1) == 0) 5275 && (intf->event_receiver != 0) 5276 && (intf->event_receiver != intf->addrinfo[0].address)) { 5277 /* 5278 * The event receiver is valid, send an IPMB 5279 * message. 5280 */ 5281 ipmb = (struct ipmi_ipmb_addr *) &addr; 5282 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5283 ipmb->channel = 0; /* FIXME - is this right? */ 5284 ipmb->lun = intf->event_receiver_lun; 5285 ipmb->slave_addr = intf->event_receiver; 5286 } else if (intf->local_sel_device) { 5287 /* 5288 * The event receiver was not valid (or was 5289 * me), but I am an SEL device, just dump it 5290 * in my SEL. 5291 */ 5292 si = (struct ipmi_system_interface_addr *) &addr; 5293 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5294 si->channel = IPMI_BMC_CHANNEL; 5295 si->lun = 0; 5296 } else 5297 return; /* No where to send the event. */ 5298 5299 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5300 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5301 msg.data = data; 5302 msg.data_len = 16; 5303 5304 j = 0; 5305 while (*p) { 5306 int size = strlen(p); 5307 5308 if (size > 11) 5309 size = 11; 5310 data[0] = 0; 5311 data[1] = 0; 5312 data[2] = 0xf0; /* OEM event without timestamp. */ 5313 data[3] = intf->addrinfo[0].address; 5314 data[4] = j++; /* sequence # */ 5315 /* 5316 * Always give 11 bytes, so strncpy will fill 5317 * it with zeroes for me. 5318 */ 5319 strncpy(data+5, p, 11); 5320 p += size; 5321 5322 ipmi_panic_request_and_wait(intf, &addr, &msg); 5323 } 5324 } 5325 5326 static int has_panicked; 5327 5328 static int panic_event(struct notifier_block *this, 5329 unsigned long event, 5330 void *ptr) 5331 { 5332 struct ipmi_smi *intf; 5333 struct ipmi_user *user; 5334 5335 if (has_panicked) 5336 return NOTIFY_DONE; 5337 has_panicked = 1; 5338 5339 /* For every registered interface, set it to run to completion. */ 5340 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5341 if (!intf->handlers || intf->intf_num == -1) 5342 /* Interface is not ready. */ 5343 continue; 5344 5345 if (!intf->handlers->poll) 5346 continue; 5347 5348 /* 5349 * If we were interrupted while locking xmit_msgs_lock or 5350 * waiting_rcv_msgs_lock, the corresponding list may be 5351 * corrupted. In this case, drop items on the list for 5352 * the safety. 5353 */ 5354 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5355 INIT_LIST_HEAD(&intf->xmit_msgs); 5356 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5357 } else 5358 spin_unlock(&intf->xmit_msgs_lock); 5359 5360 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5361 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5362 else 5363 spin_unlock(&intf->waiting_rcv_msgs_lock); 5364 5365 intf->run_to_completion = 1; 5366 if (intf->handlers->set_run_to_completion) 5367 intf->handlers->set_run_to_completion(intf->send_info, 5368 1); 5369 5370 list_for_each_entry_rcu(user, &intf->users, link) { 5371 if (user->handler->ipmi_panic_handler) 5372 user->handler->ipmi_panic_handler( 5373 user->handler_data); 5374 } 5375 5376 send_panic_events(intf, ptr); 5377 } 5378 5379 return NOTIFY_DONE; 5380 } 5381 5382 /* Must be called with ipmi_interfaces_mutex held. */ 5383 static int ipmi_register_driver(void) 5384 { 5385 int rv; 5386 5387 if (drvregistered) 5388 return 0; 5389 5390 rv = driver_register(&ipmidriver.driver); 5391 if (rv) 5392 pr_err("Could not register IPMI driver\n"); 5393 else 5394 drvregistered = true; 5395 return rv; 5396 } 5397 5398 static struct notifier_block panic_block = { 5399 .notifier_call = panic_event, 5400 .next = NULL, 5401 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5402 }; 5403 5404 static int ipmi_init_msghandler(void) 5405 { 5406 int rv; 5407 5408 mutex_lock(&ipmi_interfaces_mutex); 5409 rv = ipmi_register_driver(); 5410 if (rv) 5411 goto out; 5412 if (initialized) 5413 goto out; 5414 5415 rv = init_srcu_struct(&ipmi_interfaces_srcu); 5416 if (rv) 5417 goto out; 5418 5419 remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5420 if (!remove_work_wq) { 5421 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5422 rv = -ENOMEM; 5423 goto out_wq; 5424 } 5425 5426 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5427 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5428 5429 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5430 5431 initialized = true; 5432 5433 out_wq: 5434 if (rv) 5435 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5436 out: 5437 mutex_unlock(&ipmi_interfaces_mutex); 5438 return rv; 5439 } 5440 5441 static int __init ipmi_init_msghandler_mod(void) 5442 { 5443 int rv; 5444 5445 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5446 5447 mutex_lock(&ipmi_interfaces_mutex); 5448 rv = ipmi_register_driver(); 5449 mutex_unlock(&ipmi_interfaces_mutex); 5450 5451 return rv; 5452 } 5453 5454 static void __exit cleanup_ipmi(void) 5455 { 5456 int count; 5457 5458 if (initialized) { 5459 destroy_workqueue(remove_work_wq); 5460 5461 atomic_notifier_chain_unregister(&panic_notifier_list, 5462 &panic_block); 5463 5464 /* 5465 * This can't be called if any interfaces exist, so no worry 5466 * about shutting down the interfaces. 5467 */ 5468 5469 /* 5470 * Tell the timer to stop, then wait for it to stop. This 5471 * avoids problems with race conditions removing the timer 5472 * here. 5473 */ 5474 atomic_set(&stop_operation, 1); 5475 del_timer_sync(&ipmi_timer); 5476 5477 initialized = false; 5478 5479 /* Check for buffer leaks. */ 5480 count = atomic_read(&smi_msg_inuse_count); 5481 if (count != 0) 5482 pr_warn("SMI message count %d at exit\n", count); 5483 count = atomic_read(&recv_msg_inuse_count); 5484 if (count != 0) 5485 pr_warn("recv message count %d at exit\n", count); 5486 5487 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5488 } 5489 if (drvregistered) 5490 driver_unregister(&ipmidriver.driver); 5491 } 5492 module_exit(cleanup_ipmi); 5493 5494 module_init(ipmi_init_msghandler_mod); 5495 MODULE_LICENSE("GPL"); 5496 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5497 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5498 MODULE_VERSION(IPMI_DRIVER_VERSION); 5499 MODULE_SOFTDEP("post: ipmi_devintf"); 5500