1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "IPMI message handler: " fmt 15 #define dev_fmt(fmt) pr_fmt(fmt) 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/rcupdate.h> 32 #include <linux/interrupt.h> 33 #include <linux/moduleparam.h> 34 #include <linux/workqueue.h> 35 #include <linux/uuid.h> 36 #include <linux/nospec.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 40 #define IPMI_DRIVER_VERSION "39.2" 41 42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43 static int ipmi_init_msghandler(void); 44 static void smi_work(struct work_struct *t); 45 static void handle_new_recv_msgs(struct ipmi_smi *intf); 46 static void need_waiter(struct ipmi_smi *intf); 47 static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* The default maximum number of users that may register. */ 149 static unsigned int max_users = 30; 150 module_param(max_users, uint, 0644); 151 MODULE_PARM_DESC(max_users, 152 "The most users that may use the IPMI stack at one time."); 153 154 /* The default maximum number of message a user may have outstanding. */ 155 static unsigned int max_msgs_per_user = 100; 156 module_param(max_msgs_per_user, uint, 0644); 157 MODULE_PARM_DESC(max_msgs_per_user, 158 "The most message a user may have outstanding."); 159 160 /* Call every ~1000 ms. */ 161 #define IPMI_TIMEOUT_TIME 1000 162 163 /* How many jiffies does it take to get to the timeout time. */ 164 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 165 166 /* 167 * Request events from the queue every second (this is the number of 168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 169 * future, IPMI will add a way to know immediately if an event is in 170 * the queue and this silliness can go away. 171 */ 172 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 173 174 /* How long should we cache dynamic device IDs? */ 175 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 176 177 /* 178 * The main "user" data structure. 179 */ 180 struct ipmi_user { 181 struct list_head link; 182 183 struct kref refcount; 184 refcount_t destroyed; 185 186 /* The upper layer that handles receive messages. */ 187 const struct ipmi_user_hndl *handler; 188 void *handler_data; 189 190 /* The interface this user is bound to. */ 191 struct ipmi_smi *intf; 192 193 /* Does this interface receive IPMI events? */ 194 bool gets_events; 195 196 atomic_t nr_msgs; 197 }; 198 199 static void free_ipmi_user(struct kref *ref) 200 { 201 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 202 203 vfree(user); 204 } 205 206 static void release_ipmi_user(struct ipmi_user *user) 207 { 208 kref_put(&user->refcount, free_ipmi_user); 209 } 210 211 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) 212 { 213 if (!kref_get_unless_zero(&user->refcount)) 214 return NULL; 215 return user; 216 } 217 218 struct cmd_rcvr { 219 struct list_head link; 220 221 struct ipmi_user *user; 222 unsigned char netfn; 223 unsigned char cmd; 224 unsigned int chans; 225 226 /* 227 * This is used to form a linked lised during mass deletion. 228 * Since this is in an RCU list, we cannot use the link above 229 * or change any data until the RCU period completes. So we 230 * use this next variable during mass deletion so we can have 231 * a list and don't have to wait and restart the search on 232 * every individual deletion of a command. 233 */ 234 struct cmd_rcvr *next; 235 }; 236 237 struct seq_table { 238 unsigned int inuse : 1; 239 unsigned int broadcast : 1; 240 241 unsigned long timeout; 242 unsigned long orig_timeout; 243 unsigned int retries_left; 244 245 /* 246 * To verify on an incoming send message response that this is 247 * the message that the response is for, we keep a sequence id 248 * and increment it every time we send a message. 249 */ 250 long seqid; 251 252 /* 253 * This is held so we can properly respond to the message on a 254 * timeout, and it is used to hold the temporary data for 255 * retransmission, too. 256 */ 257 struct ipmi_recv_msg *recv_msg; 258 }; 259 260 /* 261 * Store the information in a msgid (long) to allow us to find a 262 * sequence table entry from the msgid. 263 */ 264 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 265 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 266 267 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 268 do { \ 269 seq = (((msgid) >> 26) & 0x3f); \ 270 seqid = ((msgid) & 0x3ffffff); \ 271 } while (0) 272 273 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 274 275 #define IPMI_MAX_CHANNELS 16 276 struct ipmi_channel { 277 unsigned char medium; 278 unsigned char protocol; 279 }; 280 281 struct ipmi_channel_set { 282 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 283 }; 284 285 struct ipmi_my_addrinfo { 286 /* 287 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 288 * but may be changed by the user. 289 */ 290 unsigned char address; 291 292 /* 293 * My LUN. This should generally stay the SMS LUN, but just in 294 * case... 295 */ 296 unsigned char lun; 297 }; 298 299 /* 300 * Note that the product id, manufacturer id, guid, and device id are 301 * immutable in this structure, so dyn_mutex is not required for 302 * accessing those. If those change on a BMC, a new BMC is allocated. 303 */ 304 struct bmc_device { 305 struct platform_device pdev; 306 struct list_head intfs; /* Interfaces on this BMC. */ 307 struct ipmi_device_id id; 308 struct ipmi_device_id fetch_id; 309 int dyn_id_set; 310 unsigned long dyn_id_expiry; 311 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 312 guid_t guid; 313 guid_t fetch_guid; 314 int dyn_guid_set; 315 struct kref usecount; 316 struct work_struct remove_work; 317 unsigned char cc; /* completion code */ 318 }; 319 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 320 321 static struct workqueue_struct *bmc_remove_work_wq; 322 323 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 324 struct ipmi_device_id *id, 325 bool *guid_set, guid_t *guid); 326 327 /* 328 * Various statistics for IPMI, these index stats[] in the ipmi_smi 329 * structure. 330 */ 331 enum ipmi_stat_indexes { 332 /* Commands we got from the user that were invalid. */ 333 IPMI_STAT_sent_invalid_commands = 0, 334 335 /* Commands we sent to the MC. */ 336 IPMI_STAT_sent_local_commands, 337 338 /* Responses from the MC that were delivered to a user. */ 339 IPMI_STAT_handled_local_responses, 340 341 /* Responses from the MC that were not delivered to a user. */ 342 IPMI_STAT_unhandled_local_responses, 343 344 /* Commands we sent out to the IPMB bus. */ 345 IPMI_STAT_sent_ipmb_commands, 346 347 /* Commands sent on the IPMB that had errors on the SEND CMD */ 348 IPMI_STAT_sent_ipmb_command_errs, 349 350 /* Each retransmit increments this count. */ 351 IPMI_STAT_retransmitted_ipmb_commands, 352 353 /* 354 * When a message times out (runs out of retransmits) this is 355 * incremented. 356 */ 357 IPMI_STAT_timed_out_ipmb_commands, 358 359 /* 360 * This is like above, but for broadcasts. Broadcasts are 361 * *not* included in the above count (they are expected to 362 * time out). 363 */ 364 IPMI_STAT_timed_out_ipmb_broadcasts, 365 366 /* Responses I have sent to the IPMB bus. */ 367 IPMI_STAT_sent_ipmb_responses, 368 369 /* The response was delivered to the user. */ 370 IPMI_STAT_handled_ipmb_responses, 371 372 /* The response had invalid data in it. */ 373 IPMI_STAT_invalid_ipmb_responses, 374 375 /* The response didn't have anyone waiting for it. */ 376 IPMI_STAT_unhandled_ipmb_responses, 377 378 /* Commands we sent out to the IPMB bus. */ 379 IPMI_STAT_sent_lan_commands, 380 381 /* Commands sent on the IPMB that had errors on the SEND CMD */ 382 IPMI_STAT_sent_lan_command_errs, 383 384 /* Each retransmit increments this count. */ 385 IPMI_STAT_retransmitted_lan_commands, 386 387 /* 388 * When a message times out (runs out of retransmits) this is 389 * incremented. 390 */ 391 IPMI_STAT_timed_out_lan_commands, 392 393 /* Responses I have sent to the IPMB bus. */ 394 IPMI_STAT_sent_lan_responses, 395 396 /* The response was delivered to the user. */ 397 IPMI_STAT_handled_lan_responses, 398 399 /* The response had invalid data in it. */ 400 IPMI_STAT_invalid_lan_responses, 401 402 /* The response didn't have anyone waiting for it. */ 403 IPMI_STAT_unhandled_lan_responses, 404 405 /* The command was delivered to the user. */ 406 IPMI_STAT_handled_commands, 407 408 /* The command had invalid data in it. */ 409 IPMI_STAT_invalid_commands, 410 411 /* The command didn't have anyone waiting for it. */ 412 IPMI_STAT_unhandled_commands, 413 414 /* Invalid data in an event. */ 415 IPMI_STAT_invalid_events, 416 417 /* Events that were received with the proper format. */ 418 IPMI_STAT_events, 419 420 /* Retransmissions on IPMB that failed. */ 421 IPMI_STAT_dropped_rexmit_ipmb_commands, 422 423 /* Retransmissions on LAN that failed. */ 424 IPMI_STAT_dropped_rexmit_lan_commands, 425 426 /* This *must* remain last, add new values above this. */ 427 IPMI_NUM_STATS 428 }; 429 430 431 #define IPMI_IPMB_NUM_SEQ 64 432 struct ipmi_smi { 433 struct module *owner; 434 435 /* What interface number are we? */ 436 int intf_num; 437 438 struct kref refcount; 439 440 /* Set when the interface is being unregistered. */ 441 bool in_shutdown; 442 443 /* Used for a list of interfaces. */ 444 struct list_head link; 445 446 /* 447 * The list of upper layers that are using me. 448 */ 449 struct list_head users; 450 struct mutex users_mutex; 451 atomic_t nr_users; 452 struct device_attribute nr_users_devattr; 453 struct device_attribute nr_msgs_devattr; 454 455 456 /* Used for wake ups at startup. */ 457 wait_queue_head_t waitq; 458 459 /* 460 * Prevents the interface from being unregistered when the 461 * interface is used by being looked up through the BMC 462 * structure. 463 */ 464 struct mutex bmc_reg_mutex; 465 466 struct bmc_device tmp_bmc; 467 struct bmc_device *bmc; 468 bool bmc_registered; 469 struct list_head bmc_link; 470 char *my_dev_name; 471 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 472 struct work_struct bmc_reg_work; 473 474 const struct ipmi_smi_handlers *handlers; 475 void *send_info; 476 477 /* Driver-model device for the system interface. */ 478 struct device *si_dev; 479 480 /* 481 * A table of sequence numbers for this interface. We use the 482 * sequence numbers for IPMB messages that go out of the 483 * interface to match them up with their responses. A routine 484 * is called periodically to time the items in this list. 485 */ 486 spinlock_t seq_lock; 487 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 488 int curr_seq; 489 490 /* 491 * Messages queued for deliver to the user. 492 */ 493 struct mutex user_msgs_mutex; 494 struct list_head user_msgs; 495 496 /* 497 * Messages queued for processing. If processing fails (out 498 * of memory for instance), They will stay in here to be 499 * processed later in a periodic timer interrupt. The 500 * workqueue is for handling received messages directly from 501 * the handler. 502 */ 503 spinlock_t waiting_rcv_msgs_lock; 504 struct list_head waiting_rcv_msgs; 505 atomic_t watchdog_pretimeouts_to_deliver; 506 struct work_struct smi_work; 507 508 spinlock_t xmit_msgs_lock; 509 struct list_head xmit_msgs; 510 struct ipmi_smi_msg *curr_msg; 511 struct list_head hp_xmit_msgs; 512 513 /* 514 * The list of command receivers that are registered for commands 515 * on this interface. 516 */ 517 struct mutex cmd_rcvrs_mutex; 518 struct list_head cmd_rcvrs; 519 520 /* 521 * Events that were queues because no one was there to receive 522 * them. 523 */ 524 struct mutex events_mutex; /* For dealing with event stuff. */ 525 struct list_head waiting_events; 526 unsigned int waiting_events_count; /* How many events in queue? */ 527 char event_msg_printed; 528 529 /* How many users are waiting for events? */ 530 atomic_t event_waiters; 531 unsigned int ticks_to_req_ev; 532 533 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 534 535 /* How many users are waiting for commands? */ 536 unsigned int command_waiters; 537 538 /* How many users are waiting for watchdogs? */ 539 unsigned int watchdog_waiters; 540 541 /* How many users are waiting for message responses? */ 542 unsigned int response_waiters; 543 544 /* 545 * Tells what the lower layer has last been asked to watch for, 546 * messages and/or watchdogs. Protected by watch_lock. 547 */ 548 unsigned int last_watch_mask; 549 550 /* 551 * The event receiver for my BMC, only really used at panic 552 * shutdown as a place to store this. 553 */ 554 unsigned char event_receiver; 555 unsigned char event_receiver_lun; 556 unsigned char local_sel_device; 557 unsigned char local_event_generator; 558 559 /* For handling of maintenance mode. */ 560 int maintenance_mode; 561 bool maintenance_mode_enable; 562 int auto_maintenance_timeout; 563 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 564 565 /* 566 * If we are doing maintenance on something on IPMB, extend 567 * the timeout time to avoid timeouts writing firmware and 568 * such. 569 */ 570 int ipmb_maintenance_mode_timeout; 571 572 /* 573 * A cheap hack, if this is non-null and a message to an 574 * interface comes in with a NULL user, call this routine with 575 * it. Note that the message will still be freed by the 576 * caller. This only works on the system interface. 577 * 578 * Protected by bmc_reg_mutex. 579 */ 580 void (*null_user_handler)(struct ipmi_smi *intf, 581 struct ipmi_recv_msg *msg); 582 583 /* 584 * When we are scanning the channels for an SMI, this will 585 * tell which channel we are scanning. 586 */ 587 int curr_channel; 588 589 /* Channel information */ 590 struct ipmi_channel_set *channel_list; 591 unsigned int curr_working_cset; /* First index into the following. */ 592 struct ipmi_channel_set wchannels[2]; 593 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 594 bool channels_ready; 595 596 atomic_t stats[IPMI_NUM_STATS]; 597 598 /* 599 * run_to_completion duplicate of smb_info, smi_info 600 * and ipmi_serial_info structures. Used to decrease numbers of 601 * parameters passed by "low" level IPMI code. 602 */ 603 int run_to_completion; 604 }; 605 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 606 607 static void __get_guid(struct ipmi_smi *intf); 608 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 609 static int __ipmi_bmc_register(struct ipmi_smi *intf, 610 struct ipmi_device_id *id, 611 bool guid_set, guid_t *guid, int intf_num); 612 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 613 614 615 /* 616 * The driver model view of the IPMI messaging driver. 617 */ 618 static struct platform_driver ipmidriver = { 619 .driver = { 620 .name = "ipmi", 621 .bus = &platform_bus_type 622 } 623 }; 624 /* 625 * This mutex keeps us from adding the same BMC twice. 626 */ 627 static DEFINE_MUTEX(ipmidriver_mutex); 628 629 static LIST_HEAD(ipmi_interfaces); 630 static DEFINE_MUTEX(ipmi_interfaces_mutex); 631 632 /* 633 * List of watchers that want to know when smi's are added and deleted. 634 */ 635 static LIST_HEAD(smi_watchers); 636 static DEFINE_MUTEX(smi_watchers_mutex); 637 638 #define ipmi_inc_stat(intf, stat) \ 639 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 640 #define ipmi_get_stat(intf, stat) \ 641 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 642 643 static const char * const addr_src_to_str[] = { 644 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 645 "device-tree", "platform" 646 }; 647 648 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 649 { 650 if (src >= SI_LAST) 651 src = 0; /* Invalid */ 652 return addr_src_to_str[src]; 653 } 654 EXPORT_SYMBOL(ipmi_addr_src_to_str); 655 656 static int is_lan_addr(struct ipmi_addr *addr) 657 { 658 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 659 } 660 661 static int is_ipmb_addr(struct ipmi_addr *addr) 662 { 663 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 664 } 665 666 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 667 { 668 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 669 } 670 671 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 672 { 673 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 674 } 675 676 static void free_recv_msg_list(struct list_head *q) 677 { 678 struct ipmi_recv_msg *msg, *msg2; 679 680 list_for_each_entry_safe(msg, msg2, q, link) { 681 list_del(&msg->link); 682 ipmi_free_recv_msg(msg); 683 } 684 } 685 686 static void free_smi_msg_list(struct list_head *q) 687 { 688 struct ipmi_smi_msg *msg, *msg2; 689 690 list_for_each_entry_safe(msg, msg2, q, link) { 691 list_del(&msg->link); 692 ipmi_free_smi_msg(msg); 693 } 694 } 695 696 static void intf_free(struct kref *ref) 697 { 698 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 699 int i; 700 struct cmd_rcvr *rcvr, *rcvr2; 701 702 free_smi_msg_list(&intf->waiting_rcv_msgs); 703 free_recv_msg_list(&intf->waiting_events); 704 705 /* 706 * Wholesale remove all the entries from the list in the 707 * interface. No need for locks, this is single-threaded. 708 */ 709 list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) 710 kfree(rcvr); 711 712 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 713 if ((intf->seq_table[i].inuse) 714 && (intf->seq_table[i].recv_msg)) 715 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 716 } 717 718 kfree(intf); 719 } 720 721 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 722 { 723 struct ipmi_smi *intf; 724 unsigned int count = 0, i; 725 int *interfaces = NULL; 726 struct device **devices = NULL; 727 int rv = 0; 728 729 /* 730 * Make sure the driver is actually initialized, this handles 731 * problems with initialization order. 732 */ 733 rv = ipmi_init_msghandler(); 734 if (rv) 735 return rv; 736 737 mutex_lock(&smi_watchers_mutex); 738 739 list_add(&watcher->link, &smi_watchers); 740 741 /* 742 * Build an array of ipmi interfaces and fill it in, and 743 * another array of the devices. We can't call the callback 744 * with ipmi_interfaces_mutex held. smi_watchers_mutex will 745 * keep things in order for the user. 746 */ 747 mutex_lock(&ipmi_interfaces_mutex); 748 list_for_each_entry(intf, &ipmi_interfaces, link) 749 count++; 750 if (count > 0) { 751 interfaces = kmalloc_array(count, sizeof(*interfaces), 752 GFP_KERNEL); 753 if (!interfaces) { 754 rv = -ENOMEM; 755 } else { 756 devices = kmalloc_array(count, sizeof(*devices), 757 GFP_KERNEL); 758 if (!devices) { 759 kfree(interfaces); 760 interfaces = NULL; 761 rv = -ENOMEM; 762 } 763 } 764 count = 0; 765 } 766 if (interfaces) { 767 list_for_each_entry(intf, &ipmi_interfaces, link) { 768 int intf_num = READ_ONCE(intf->intf_num); 769 770 if (intf_num == -1) 771 continue; 772 devices[count] = intf->si_dev; 773 interfaces[count++] = intf_num; 774 } 775 } 776 mutex_unlock(&ipmi_interfaces_mutex); 777 778 if (interfaces) { 779 for (i = 0; i < count; i++) 780 watcher->new_smi(interfaces[i], devices[i]); 781 kfree(interfaces); 782 kfree(devices); 783 } 784 785 mutex_unlock(&smi_watchers_mutex); 786 787 return rv; 788 } 789 EXPORT_SYMBOL(ipmi_smi_watcher_register); 790 791 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 792 { 793 mutex_lock(&smi_watchers_mutex); 794 list_del(&watcher->link); 795 mutex_unlock(&smi_watchers_mutex); 796 return 0; 797 } 798 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 799 800 static void 801 call_smi_watchers(int i, struct device *dev) 802 { 803 struct ipmi_smi_watcher *w; 804 805 list_for_each_entry(w, &smi_watchers, link) { 806 if (try_module_get(w->owner)) { 807 w->new_smi(i, dev); 808 module_put(w->owner); 809 } 810 } 811 } 812 813 static int 814 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 815 { 816 if (addr1->addr_type != addr2->addr_type) 817 return 0; 818 819 if (addr1->channel != addr2->channel) 820 return 0; 821 822 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 823 struct ipmi_system_interface_addr *smi_addr1 824 = (struct ipmi_system_interface_addr *) addr1; 825 struct ipmi_system_interface_addr *smi_addr2 826 = (struct ipmi_system_interface_addr *) addr2; 827 return (smi_addr1->lun == smi_addr2->lun); 828 } 829 830 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 831 struct ipmi_ipmb_addr *ipmb_addr1 832 = (struct ipmi_ipmb_addr *) addr1; 833 struct ipmi_ipmb_addr *ipmb_addr2 834 = (struct ipmi_ipmb_addr *) addr2; 835 836 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 837 && (ipmb_addr1->lun == ipmb_addr2->lun)); 838 } 839 840 if (is_ipmb_direct_addr(addr1)) { 841 struct ipmi_ipmb_direct_addr *daddr1 842 = (struct ipmi_ipmb_direct_addr *) addr1; 843 struct ipmi_ipmb_direct_addr *daddr2 844 = (struct ipmi_ipmb_direct_addr *) addr2; 845 846 return daddr1->slave_addr == daddr2->slave_addr && 847 daddr1->rq_lun == daddr2->rq_lun && 848 daddr1->rs_lun == daddr2->rs_lun; 849 } 850 851 if (is_lan_addr(addr1)) { 852 struct ipmi_lan_addr *lan_addr1 853 = (struct ipmi_lan_addr *) addr1; 854 struct ipmi_lan_addr *lan_addr2 855 = (struct ipmi_lan_addr *) addr2; 856 857 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 858 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 859 && (lan_addr1->session_handle 860 == lan_addr2->session_handle) 861 && (lan_addr1->lun == lan_addr2->lun)); 862 } 863 864 return 1; 865 } 866 867 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 868 { 869 if (len < sizeof(struct ipmi_system_interface_addr)) 870 return -EINVAL; 871 872 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 873 if (addr->channel != IPMI_BMC_CHANNEL) 874 return -EINVAL; 875 return 0; 876 } 877 878 if ((addr->channel == IPMI_BMC_CHANNEL) 879 || (addr->channel >= IPMI_MAX_CHANNELS) 880 || (addr->channel < 0)) 881 return -EINVAL; 882 883 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 884 if (len < sizeof(struct ipmi_ipmb_addr)) 885 return -EINVAL; 886 return 0; 887 } 888 889 if (is_ipmb_direct_addr(addr)) { 890 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 891 892 if (addr->channel != 0) 893 return -EINVAL; 894 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 895 return -EINVAL; 896 897 if (daddr->slave_addr & 0x01) 898 return -EINVAL; 899 if (daddr->rq_lun >= 4) 900 return -EINVAL; 901 if (daddr->rs_lun >= 4) 902 return -EINVAL; 903 return 0; 904 } 905 906 if (is_lan_addr(addr)) { 907 if (len < sizeof(struct ipmi_lan_addr)) 908 return -EINVAL; 909 return 0; 910 } 911 912 return -EINVAL; 913 } 914 EXPORT_SYMBOL(ipmi_validate_addr); 915 916 unsigned int ipmi_addr_length(int addr_type) 917 { 918 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 919 return sizeof(struct ipmi_system_interface_addr); 920 921 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 922 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 923 return sizeof(struct ipmi_ipmb_addr); 924 925 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 926 return sizeof(struct ipmi_ipmb_direct_addr); 927 928 if (addr_type == IPMI_LAN_ADDR_TYPE) 929 return sizeof(struct ipmi_lan_addr); 930 931 return 0; 932 } 933 EXPORT_SYMBOL(ipmi_addr_length); 934 935 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 936 { 937 int rv = 0; 938 939 if (!msg->user) { 940 /* Special handling for NULL users. */ 941 if (intf->null_user_handler) { 942 intf->null_user_handler(intf, msg); 943 } else { 944 /* No handler, so give up. */ 945 rv = -EINVAL; 946 } 947 ipmi_free_recv_msg(msg); 948 } else if (oops_in_progress) { 949 /* 950 * If we are running in the panic context, calling the 951 * receive handler doesn't much meaning and has a deadlock 952 * risk. At this moment, simply skip it in that case. 953 */ 954 ipmi_free_recv_msg(msg); 955 atomic_dec(&msg->user->nr_msgs); 956 } else { 957 struct ipmi_user *user = acquire_ipmi_user(msg->user); 958 959 if (user) { 960 /* Deliver it in smi_work. */ 961 mutex_lock(&intf->user_msgs_mutex); 962 list_add_tail(&msg->link, &intf->user_msgs); 963 mutex_unlock(&intf->user_msgs_mutex); 964 queue_work(system_wq, &intf->smi_work); 965 /* User release will happen in the work queue. */ 966 } else { 967 /* User went away, give up. */ 968 ipmi_free_recv_msg(msg); 969 rv = -EINVAL; 970 } 971 } 972 973 return rv; 974 } 975 976 static void deliver_local_response(struct ipmi_smi *intf, 977 struct ipmi_recv_msg *msg) 978 { 979 if (deliver_response(intf, msg)) 980 ipmi_inc_stat(intf, unhandled_local_responses); 981 else 982 ipmi_inc_stat(intf, handled_local_responses); 983 } 984 985 static void deliver_err_response(struct ipmi_smi *intf, 986 struct ipmi_recv_msg *msg, int err) 987 { 988 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 989 msg->msg_data[0] = err; 990 msg->msg.netfn |= 1; /* Convert to a response. */ 991 msg->msg.data_len = 1; 992 msg->msg.data = msg->msg_data; 993 deliver_local_response(intf, msg); 994 } 995 996 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 997 { 998 unsigned long iflags; 999 1000 if (!intf->handlers->set_need_watch) 1001 return; 1002 1003 spin_lock_irqsave(&intf->watch_lock, iflags); 1004 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1005 intf->response_waiters++; 1006 1007 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1008 intf->watchdog_waiters++; 1009 1010 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1011 intf->command_waiters++; 1012 1013 if ((intf->last_watch_mask & flags) != flags) { 1014 intf->last_watch_mask |= flags; 1015 intf->handlers->set_need_watch(intf->send_info, 1016 intf->last_watch_mask); 1017 } 1018 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1019 } 1020 1021 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1022 { 1023 unsigned long iflags; 1024 1025 if (!intf->handlers->set_need_watch) 1026 return; 1027 1028 spin_lock_irqsave(&intf->watch_lock, iflags); 1029 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1030 intf->response_waiters--; 1031 1032 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1033 intf->watchdog_waiters--; 1034 1035 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1036 intf->command_waiters--; 1037 1038 flags = 0; 1039 if (intf->response_waiters) 1040 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1041 if (intf->watchdog_waiters) 1042 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1043 if (intf->command_waiters) 1044 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1045 1046 if (intf->last_watch_mask != flags) { 1047 intf->last_watch_mask = flags; 1048 intf->handlers->set_need_watch(intf->send_info, 1049 intf->last_watch_mask); 1050 } 1051 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1052 } 1053 1054 /* 1055 * Find the next sequence number not being used and add the given 1056 * message with the given timeout to the sequence table. This must be 1057 * called with the interface's seq_lock held. 1058 */ 1059 static int intf_next_seq(struct ipmi_smi *intf, 1060 struct ipmi_recv_msg *recv_msg, 1061 unsigned long timeout, 1062 int retries, 1063 int broadcast, 1064 unsigned char *seq, 1065 long *seqid) 1066 { 1067 int rv = 0; 1068 unsigned int i; 1069 1070 if (timeout == 0) 1071 timeout = default_retry_ms; 1072 if (retries < 0) 1073 retries = default_max_retries; 1074 1075 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1076 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1077 if (!intf->seq_table[i].inuse) 1078 break; 1079 } 1080 1081 if (!intf->seq_table[i].inuse) { 1082 intf->seq_table[i].recv_msg = recv_msg; 1083 1084 /* 1085 * Start with the maximum timeout, when the send response 1086 * comes in we will start the real timer. 1087 */ 1088 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1089 intf->seq_table[i].orig_timeout = timeout; 1090 intf->seq_table[i].retries_left = retries; 1091 intf->seq_table[i].broadcast = broadcast; 1092 intf->seq_table[i].inuse = 1; 1093 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1094 *seq = i; 1095 *seqid = intf->seq_table[i].seqid; 1096 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1097 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1098 need_waiter(intf); 1099 } else { 1100 rv = -EAGAIN; 1101 } 1102 1103 return rv; 1104 } 1105 1106 /* 1107 * Return the receive message for the given sequence number and 1108 * release the sequence number so it can be reused. Some other data 1109 * is passed in to be sure the message matches up correctly (to help 1110 * guard against message coming in after their timeout and the 1111 * sequence number being reused). 1112 */ 1113 static int intf_find_seq(struct ipmi_smi *intf, 1114 unsigned char seq, 1115 short channel, 1116 unsigned char cmd, 1117 unsigned char netfn, 1118 struct ipmi_addr *addr, 1119 struct ipmi_recv_msg **recv_msg) 1120 { 1121 int rv = -ENODEV; 1122 unsigned long flags; 1123 1124 if (seq >= IPMI_IPMB_NUM_SEQ) 1125 return -EINVAL; 1126 1127 spin_lock_irqsave(&intf->seq_lock, flags); 1128 if (intf->seq_table[seq].inuse) { 1129 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1130 1131 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1132 && (msg->msg.netfn == netfn) 1133 && (ipmi_addr_equal(addr, &msg->addr))) { 1134 *recv_msg = msg; 1135 intf->seq_table[seq].inuse = 0; 1136 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1137 rv = 0; 1138 } 1139 } 1140 spin_unlock_irqrestore(&intf->seq_lock, flags); 1141 1142 return rv; 1143 } 1144 1145 1146 /* Start the timer for a specific sequence table entry. */ 1147 static int intf_start_seq_timer(struct ipmi_smi *intf, 1148 long msgid) 1149 { 1150 int rv = -ENODEV; 1151 unsigned long flags; 1152 unsigned char seq; 1153 unsigned long seqid; 1154 1155 1156 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1157 1158 spin_lock_irqsave(&intf->seq_lock, flags); 1159 /* 1160 * We do this verification because the user can be deleted 1161 * while a message is outstanding. 1162 */ 1163 if ((intf->seq_table[seq].inuse) 1164 && (intf->seq_table[seq].seqid == seqid)) { 1165 struct seq_table *ent = &intf->seq_table[seq]; 1166 ent->timeout = ent->orig_timeout; 1167 rv = 0; 1168 } 1169 spin_unlock_irqrestore(&intf->seq_lock, flags); 1170 1171 return rv; 1172 } 1173 1174 /* Got an error for the send message for a specific sequence number. */ 1175 static int intf_err_seq(struct ipmi_smi *intf, 1176 long msgid, 1177 unsigned int err) 1178 { 1179 int rv = -ENODEV; 1180 unsigned long flags; 1181 unsigned char seq; 1182 unsigned long seqid; 1183 struct ipmi_recv_msg *msg = NULL; 1184 1185 1186 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1187 1188 spin_lock_irqsave(&intf->seq_lock, flags); 1189 /* 1190 * We do this verification because the user can be deleted 1191 * while a message is outstanding. 1192 */ 1193 if ((intf->seq_table[seq].inuse) 1194 && (intf->seq_table[seq].seqid == seqid)) { 1195 struct seq_table *ent = &intf->seq_table[seq]; 1196 1197 ent->inuse = 0; 1198 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1199 msg = ent->recv_msg; 1200 rv = 0; 1201 } 1202 spin_unlock_irqrestore(&intf->seq_lock, flags); 1203 1204 if (msg) 1205 deliver_err_response(intf, msg, err); 1206 1207 return rv; 1208 } 1209 1210 int ipmi_create_user(unsigned int if_num, 1211 const struct ipmi_user_hndl *handler, 1212 void *handler_data, 1213 struct ipmi_user **user) 1214 { 1215 unsigned long flags; 1216 struct ipmi_user *new_user = NULL; 1217 int rv = 0; 1218 struct ipmi_smi *intf; 1219 1220 /* 1221 * There is no module usecount here, because it's not 1222 * required. Since this can only be used by and called from 1223 * other modules, they will implicitly use this module, and 1224 * thus this can't be removed unless the other modules are 1225 * removed. 1226 */ 1227 1228 if (handler == NULL) 1229 return -EINVAL; 1230 1231 /* 1232 * Make sure the driver is actually initialized, this handles 1233 * problems with initialization order. 1234 */ 1235 rv = ipmi_init_msghandler(); 1236 if (rv) 1237 return rv; 1238 1239 mutex_lock(&ipmi_interfaces_mutex); 1240 list_for_each_entry(intf, &ipmi_interfaces, link) { 1241 if (intf->intf_num == if_num) 1242 goto found; 1243 } 1244 /* Not found, return an error */ 1245 rv = -EINVAL; 1246 goto out_kfree; 1247 1248 found: 1249 if (intf->in_shutdown) { 1250 rv = -ENODEV; 1251 goto out_kfree; 1252 } 1253 1254 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1255 rv = -EBUSY; 1256 goto out_kfree; 1257 } 1258 1259 new_user = vzalloc(sizeof(*new_user)); 1260 if (!new_user) { 1261 rv = -ENOMEM; 1262 goto out_kfree; 1263 } 1264 1265 if (!try_module_get(intf->owner)) { 1266 rv = -ENODEV; 1267 goto out_kfree; 1268 } 1269 1270 /* Note that each existing user holds a refcount to the interface. */ 1271 kref_get(&intf->refcount); 1272 1273 atomic_set(&new_user->nr_msgs, 0); 1274 kref_init(&new_user->refcount); 1275 refcount_set(&new_user->destroyed, 1); 1276 kref_get(&new_user->refcount); /* Destroy owns a refcount. */ 1277 new_user->handler = handler; 1278 new_user->handler_data = handler_data; 1279 new_user->intf = intf; 1280 new_user->gets_events = false; 1281 1282 mutex_lock(&intf->users_mutex); 1283 spin_lock_irqsave(&intf->seq_lock, flags); 1284 list_add(&new_user->link, &intf->users); 1285 spin_unlock_irqrestore(&intf->seq_lock, flags); 1286 mutex_unlock(&intf->users_mutex); 1287 1288 if (handler->ipmi_watchdog_pretimeout) 1289 /* User wants pretimeouts, so make sure to watch for them. */ 1290 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1291 1292 out_kfree: 1293 if (rv) { 1294 atomic_dec(&intf->nr_users); 1295 vfree(new_user); 1296 } else { 1297 *user = new_user; 1298 } 1299 mutex_unlock(&ipmi_interfaces_mutex); 1300 return rv; 1301 } 1302 EXPORT_SYMBOL(ipmi_create_user); 1303 1304 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1305 { 1306 int rv = -EINVAL; 1307 struct ipmi_smi *intf; 1308 1309 mutex_lock(&ipmi_interfaces_mutex); 1310 list_for_each_entry(intf, &ipmi_interfaces, link) { 1311 if (intf->intf_num == if_num) { 1312 if (!intf->handlers->get_smi_info) 1313 rv = -ENOTTY; 1314 else 1315 rv = intf->handlers->get_smi_info(intf->send_info, data); 1316 break; 1317 } 1318 } 1319 mutex_unlock(&ipmi_interfaces_mutex); 1320 1321 return rv; 1322 } 1323 EXPORT_SYMBOL(ipmi_get_smi_info); 1324 1325 /* Must be called with intf->users_mutex held. */ 1326 static void _ipmi_destroy_user(struct ipmi_user *user) 1327 { 1328 struct ipmi_smi *intf = user->intf; 1329 int i; 1330 unsigned long flags; 1331 struct cmd_rcvr *rcvr; 1332 struct cmd_rcvr *rcvrs = NULL; 1333 struct module *owner; 1334 1335 if (!refcount_dec_if_one(&user->destroyed)) 1336 return; 1337 1338 if (user->handler->shutdown) 1339 user->handler->shutdown(user->handler_data); 1340 1341 if (user->handler->ipmi_watchdog_pretimeout) 1342 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1343 1344 if (user->gets_events) 1345 atomic_dec(&intf->event_waiters); 1346 1347 /* Remove the user from the interface's list and sequence table. */ 1348 list_del(&user->link); 1349 atomic_dec(&intf->nr_users); 1350 1351 spin_lock_irqsave(&intf->seq_lock, flags); 1352 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1353 if (intf->seq_table[i].inuse 1354 && (intf->seq_table[i].recv_msg->user == user)) { 1355 intf->seq_table[i].inuse = 0; 1356 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1357 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1358 } 1359 } 1360 spin_unlock_irqrestore(&intf->seq_lock, flags); 1361 1362 /* 1363 * Remove the user from the command receiver's table. First 1364 * we build a list of everything (not using the standard link, 1365 * since other things may be using it till we do 1366 * synchronize_rcu()) then free everything in that list. 1367 */ 1368 mutex_lock(&intf->cmd_rcvrs_mutex); 1369 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1370 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1371 if (rcvr->user == user) { 1372 list_del_rcu(&rcvr->link); 1373 rcvr->next = rcvrs; 1374 rcvrs = rcvr; 1375 } 1376 } 1377 mutex_unlock(&intf->cmd_rcvrs_mutex); 1378 while (rcvrs) { 1379 rcvr = rcvrs; 1380 rcvrs = rcvr->next; 1381 kfree(rcvr); 1382 } 1383 1384 release_ipmi_user(user); 1385 1386 owner = intf->owner; 1387 kref_put(&intf->refcount, intf_free); 1388 module_put(owner); 1389 } 1390 1391 void ipmi_destroy_user(struct ipmi_user *user) 1392 { 1393 struct ipmi_smi *intf = user->intf; 1394 1395 mutex_lock(&intf->users_mutex); 1396 _ipmi_destroy_user(user); 1397 mutex_unlock(&intf->users_mutex); 1398 1399 kref_put(&user->refcount, free_ipmi_user); 1400 } 1401 EXPORT_SYMBOL(ipmi_destroy_user); 1402 1403 int ipmi_get_version(struct ipmi_user *user, 1404 unsigned char *major, 1405 unsigned char *minor) 1406 { 1407 struct ipmi_device_id id; 1408 int rv; 1409 1410 user = acquire_ipmi_user(user); 1411 if (!user) 1412 return -ENODEV; 1413 1414 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1415 if (!rv) { 1416 *major = ipmi_version_major(&id); 1417 *minor = ipmi_version_minor(&id); 1418 } 1419 release_ipmi_user(user); 1420 1421 return rv; 1422 } 1423 EXPORT_SYMBOL(ipmi_get_version); 1424 1425 int ipmi_set_my_address(struct ipmi_user *user, 1426 unsigned int channel, 1427 unsigned char address) 1428 { 1429 int rv = 0; 1430 1431 user = acquire_ipmi_user(user); 1432 if (!user) 1433 return -ENODEV; 1434 1435 if (channel >= IPMI_MAX_CHANNELS) { 1436 rv = -EINVAL; 1437 } else { 1438 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1439 user->intf->addrinfo[channel].address = address; 1440 } 1441 release_ipmi_user(user); 1442 1443 return rv; 1444 } 1445 EXPORT_SYMBOL(ipmi_set_my_address); 1446 1447 int ipmi_get_my_address(struct ipmi_user *user, 1448 unsigned int channel, 1449 unsigned char *address) 1450 { 1451 int rv = 0; 1452 1453 user = acquire_ipmi_user(user); 1454 if (!user) 1455 return -ENODEV; 1456 1457 if (channel >= IPMI_MAX_CHANNELS) { 1458 rv = -EINVAL; 1459 } else { 1460 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1461 *address = user->intf->addrinfo[channel].address; 1462 } 1463 release_ipmi_user(user); 1464 1465 return rv; 1466 } 1467 EXPORT_SYMBOL(ipmi_get_my_address); 1468 1469 int ipmi_set_my_LUN(struct ipmi_user *user, 1470 unsigned int channel, 1471 unsigned char LUN) 1472 { 1473 int rv = 0; 1474 1475 user = acquire_ipmi_user(user); 1476 if (!user) 1477 return -ENODEV; 1478 1479 if (channel >= IPMI_MAX_CHANNELS) { 1480 rv = -EINVAL; 1481 } else { 1482 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1483 user->intf->addrinfo[channel].lun = LUN & 0x3; 1484 } 1485 release_ipmi_user(user); 1486 1487 return rv; 1488 } 1489 EXPORT_SYMBOL(ipmi_set_my_LUN); 1490 1491 int ipmi_get_my_LUN(struct ipmi_user *user, 1492 unsigned int channel, 1493 unsigned char *address) 1494 { 1495 int rv = 0; 1496 1497 user = acquire_ipmi_user(user); 1498 if (!user) 1499 return -ENODEV; 1500 1501 if (channel >= IPMI_MAX_CHANNELS) { 1502 rv = -EINVAL; 1503 } else { 1504 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1505 *address = user->intf->addrinfo[channel].lun; 1506 } 1507 release_ipmi_user(user); 1508 1509 return rv; 1510 } 1511 EXPORT_SYMBOL(ipmi_get_my_LUN); 1512 1513 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1514 { 1515 int mode; 1516 unsigned long flags; 1517 1518 user = acquire_ipmi_user(user); 1519 if (!user) 1520 return -ENODEV; 1521 1522 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1523 mode = user->intf->maintenance_mode; 1524 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1525 release_ipmi_user(user); 1526 1527 return mode; 1528 } 1529 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1530 1531 static void maintenance_mode_update(struct ipmi_smi *intf) 1532 { 1533 if (intf->handlers->set_maintenance_mode) 1534 intf->handlers->set_maintenance_mode( 1535 intf->send_info, intf->maintenance_mode_enable); 1536 } 1537 1538 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1539 { 1540 int rv = 0; 1541 unsigned long flags; 1542 struct ipmi_smi *intf = user->intf; 1543 1544 user = acquire_ipmi_user(user); 1545 if (!user) 1546 return -ENODEV; 1547 1548 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1549 if (intf->maintenance_mode != mode) { 1550 switch (mode) { 1551 case IPMI_MAINTENANCE_MODE_AUTO: 1552 intf->maintenance_mode_enable 1553 = (intf->auto_maintenance_timeout > 0); 1554 break; 1555 1556 case IPMI_MAINTENANCE_MODE_OFF: 1557 intf->maintenance_mode_enable = false; 1558 break; 1559 1560 case IPMI_MAINTENANCE_MODE_ON: 1561 intf->maintenance_mode_enable = true; 1562 break; 1563 1564 default: 1565 rv = -EINVAL; 1566 goto out_unlock; 1567 } 1568 intf->maintenance_mode = mode; 1569 1570 maintenance_mode_update(intf); 1571 } 1572 out_unlock: 1573 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1574 release_ipmi_user(user); 1575 1576 return rv; 1577 } 1578 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1579 1580 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1581 { 1582 struct ipmi_smi *intf = user->intf; 1583 struct ipmi_recv_msg *msg, *msg2; 1584 struct list_head msgs; 1585 1586 user = acquire_ipmi_user(user); 1587 if (!user) 1588 return -ENODEV; 1589 1590 INIT_LIST_HEAD(&msgs); 1591 1592 mutex_lock(&intf->events_mutex); 1593 if (user->gets_events == val) 1594 goto out; 1595 1596 user->gets_events = val; 1597 1598 if (val) { 1599 if (atomic_inc_return(&intf->event_waiters) == 1) 1600 need_waiter(intf); 1601 } else { 1602 atomic_dec(&intf->event_waiters); 1603 } 1604 1605 /* Deliver any queued events. */ 1606 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1607 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1608 list_move_tail(&msg->link, &msgs); 1609 intf->waiting_events_count = 0; 1610 if (intf->event_msg_printed) { 1611 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1612 intf->event_msg_printed = 0; 1613 } 1614 1615 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1616 msg->user = user; 1617 kref_get(&user->refcount); 1618 deliver_local_response(intf, msg); 1619 } 1620 } 1621 1622 out: 1623 mutex_unlock(&intf->events_mutex); 1624 release_ipmi_user(user); 1625 1626 return 0; 1627 } 1628 EXPORT_SYMBOL(ipmi_set_gets_events); 1629 1630 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1631 unsigned char netfn, 1632 unsigned char cmd, 1633 unsigned char chan) 1634 { 1635 struct cmd_rcvr *rcvr; 1636 1637 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1638 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1639 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1640 && (rcvr->chans & (1 << chan))) 1641 return rcvr; 1642 } 1643 return NULL; 1644 } 1645 1646 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1647 unsigned char netfn, 1648 unsigned char cmd, 1649 unsigned int chans) 1650 { 1651 struct cmd_rcvr *rcvr; 1652 1653 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1654 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1655 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1656 && (rcvr->chans & chans)) 1657 return 0; 1658 } 1659 return 1; 1660 } 1661 1662 int ipmi_register_for_cmd(struct ipmi_user *user, 1663 unsigned char netfn, 1664 unsigned char cmd, 1665 unsigned int chans) 1666 { 1667 struct ipmi_smi *intf = user->intf; 1668 struct cmd_rcvr *rcvr; 1669 int rv = 0; 1670 1671 user = acquire_ipmi_user(user); 1672 if (!user) 1673 return -ENODEV; 1674 1675 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1676 if (!rcvr) { 1677 rv = -ENOMEM; 1678 goto out_release; 1679 } 1680 rcvr->cmd = cmd; 1681 rcvr->netfn = netfn; 1682 rcvr->chans = chans; 1683 rcvr->user = user; 1684 1685 mutex_lock(&intf->cmd_rcvrs_mutex); 1686 /* Make sure the command/netfn is not already registered. */ 1687 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1688 rv = -EBUSY; 1689 goto out_unlock; 1690 } 1691 1692 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1693 1694 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1695 1696 out_unlock: 1697 mutex_unlock(&intf->cmd_rcvrs_mutex); 1698 if (rv) 1699 kfree(rcvr); 1700 out_release: 1701 release_ipmi_user(user); 1702 1703 return rv; 1704 } 1705 EXPORT_SYMBOL(ipmi_register_for_cmd); 1706 1707 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1708 unsigned char netfn, 1709 unsigned char cmd, 1710 unsigned int chans) 1711 { 1712 struct ipmi_smi *intf = user->intf; 1713 struct cmd_rcvr *rcvr; 1714 struct cmd_rcvr *rcvrs = NULL; 1715 int i, rv = -ENOENT; 1716 1717 user = acquire_ipmi_user(user); 1718 if (!user) 1719 return -ENODEV; 1720 1721 mutex_lock(&intf->cmd_rcvrs_mutex); 1722 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1723 if (((1 << i) & chans) == 0) 1724 continue; 1725 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1726 if (rcvr == NULL) 1727 continue; 1728 if (rcvr->user == user) { 1729 rv = 0; 1730 rcvr->chans &= ~chans; 1731 if (rcvr->chans == 0) { 1732 list_del_rcu(&rcvr->link); 1733 rcvr->next = rcvrs; 1734 rcvrs = rcvr; 1735 } 1736 } 1737 } 1738 mutex_unlock(&intf->cmd_rcvrs_mutex); 1739 synchronize_rcu(); 1740 release_ipmi_user(user); 1741 while (rcvrs) { 1742 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1743 rcvr = rcvrs; 1744 rcvrs = rcvr->next; 1745 kfree(rcvr); 1746 } 1747 1748 return rv; 1749 } 1750 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1751 1752 unsigned char 1753 ipmb_checksum(unsigned char *data, int size) 1754 { 1755 unsigned char csum = 0; 1756 1757 for (; size > 0; size--, data++) 1758 csum += *data; 1759 1760 return -csum; 1761 } 1762 EXPORT_SYMBOL(ipmb_checksum); 1763 1764 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1765 struct kernel_ipmi_msg *msg, 1766 struct ipmi_ipmb_addr *ipmb_addr, 1767 long msgid, 1768 unsigned char ipmb_seq, 1769 int broadcast, 1770 unsigned char source_address, 1771 unsigned char source_lun) 1772 { 1773 int i = broadcast; 1774 1775 /* Format the IPMB header data. */ 1776 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1777 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1778 smi_msg->data[2] = ipmb_addr->channel; 1779 if (broadcast) 1780 smi_msg->data[3] = 0; 1781 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1782 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1783 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1784 smi_msg->data[i+6] = source_address; 1785 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1786 smi_msg->data[i+8] = msg->cmd; 1787 1788 /* Now tack on the data to the message. */ 1789 if (msg->data_len > 0) 1790 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1791 smi_msg->data_size = msg->data_len + 9; 1792 1793 /* Now calculate the checksum and tack it on. */ 1794 smi_msg->data[i+smi_msg->data_size] 1795 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1796 1797 /* 1798 * Add on the checksum size and the offset from the 1799 * broadcast. 1800 */ 1801 smi_msg->data_size += 1 + i; 1802 1803 smi_msg->msgid = msgid; 1804 } 1805 1806 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1807 struct kernel_ipmi_msg *msg, 1808 struct ipmi_lan_addr *lan_addr, 1809 long msgid, 1810 unsigned char ipmb_seq, 1811 unsigned char source_lun) 1812 { 1813 /* Format the IPMB header data. */ 1814 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1815 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1816 smi_msg->data[2] = lan_addr->channel; 1817 smi_msg->data[3] = lan_addr->session_handle; 1818 smi_msg->data[4] = lan_addr->remote_SWID; 1819 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1820 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1821 smi_msg->data[7] = lan_addr->local_SWID; 1822 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1823 smi_msg->data[9] = msg->cmd; 1824 1825 /* Now tack on the data to the message. */ 1826 if (msg->data_len > 0) 1827 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1828 smi_msg->data_size = msg->data_len + 10; 1829 1830 /* Now calculate the checksum and tack it on. */ 1831 smi_msg->data[smi_msg->data_size] 1832 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1833 1834 /* 1835 * Add on the checksum size and the offset from the 1836 * broadcast. 1837 */ 1838 smi_msg->data_size += 1; 1839 1840 smi_msg->msgid = msgid; 1841 } 1842 1843 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1844 struct ipmi_smi_msg *smi_msg, 1845 int priority) 1846 { 1847 if (intf->curr_msg) { 1848 if (priority > 0) 1849 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1850 else 1851 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1852 smi_msg = NULL; 1853 } else { 1854 intf->curr_msg = smi_msg; 1855 } 1856 1857 return smi_msg; 1858 } 1859 1860 static void smi_send(struct ipmi_smi *intf, 1861 const struct ipmi_smi_handlers *handlers, 1862 struct ipmi_smi_msg *smi_msg, int priority) 1863 { 1864 int run_to_completion = READ_ONCE(intf->run_to_completion); 1865 unsigned long flags = 0; 1866 1867 if (!run_to_completion) 1868 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1869 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1870 if (!run_to_completion) 1871 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1872 1873 if (smi_msg) 1874 handlers->sender(intf->send_info, smi_msg); 1875 } 1876 1877 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1878 { 1879 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1880 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1881 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1882 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1883 } 1884 1885 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1886 struct ipmi_addr *addr, 1887 long msgid, 1888 struct kernel_ipmi_msg *msg, 1889 struct ipmi_smi_msg *smi_msg, 1890 struct ipmi_recv_msg *recv_msg, 1891 int retries, 1892 unsigned int retry_time_ms) 1893 { 1894 struct ipmi_system_interface_addr *smi_addr; 1895 1896 if (msg->netfn & 1) 1897 /* Responses are not allowed to the SMI. */ 1898 return -EINVAL; 1899 1900 smi_addr = (struct ipmi_system_interface_addr *) addr; 1901 if (smi_addr->lun > 3) { 1902 ipmi_inc_stat(intf, sent_invalid_commands); 1903 return -EINVAL; 1904 } 1905 1906 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1907 1908 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1909 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1910 || (msg->cmd == IPMI_GET_MSG_CMD) 1911 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1912 /* 1913 * We don't let the user do these, since we manage 1914 * the sequence numbers. 1915 */ 1916 ipmi_inc_stat(intf, sent_invalid_commands); 1917 return -EINVAL; 1918 } 1919 1920 if (is_maintenance_mode_cmd(msg)) { 1921 unsigned long flags; 1922 1923 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1924 intf->auto_maintenance_timeout 1925 = maintenance_mode_timeout_ms; 1926 if (!intf->maintenance_mode 1927 && !intf->maintenance_mode_enable) { 1928 intf->maintenance_mode_enable = true; 1929 maintenance_mode_update(intf); 1930 } 1931 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1932 flags); 1933 } 1934 1935 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1936 ipmi_inc_stat(intf, sent_invalid_commands); 1937 return -EMSGSIZE; 1938 } 1939 1940 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1941 smi_msg->data[1] = msg->cmd; 1942 smi_msg->msgid = msgid; 1943 smi_msg->user_data = recv_msg; 1944 if (msg->data_len > 0) 1945 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1946 smi_msg->data_size = msg->data_len + 2; 1947 ipmi_inc_stat(intf, sent_local_commands); 1948 1949 return 0; 1950 } 1951 1952 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1953 struct ipmi_addr *addr, 1954 long msgid, 1955 struct kernel_ipmi_msg *msg, 1956 struct ipmi_smi_msg *smi_msg, 1957 struct ipmi_recv_msg *recv_msg, 1958 unsigned char source_address, 1959 unsigned char source_lun, 1960 int retries, 1961 unsigned int retry_time_ms) 1962 { 1963 struct ipmi_ipmb_addr *ipmb_addr; 1964 unsigned char ipmb_seq; 1965 long seqid; 1966 int broadcast = 0; 1967 struct ipmi_channel *chans; 1968 int rv = 0; 1969 1970 if (addr->channel >= IPMI_MAX_CHANNELS) { 1971 ipmi_inc_stat(intf, sent_invalid_commands); 1972 return -EINVAL; 1973 } 1974 1975 chans = READ_ONCE(intf->channel_list)->c; 1976 1977 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1978 ipmi_inc_stat(intf, sent_invalid_commands); 1979 return -EINVAL; 1980 } 1981 1982 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1983 /* 1984 * Broadcasts add a zero at the beginning of the 1985 * message, but otherwise is the same as an IPMB 1986 * address. 1987 */ 1988 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1989 broadcast = 1; 1990 retries = 0; /* Don't retry broadcasts. */ 1991 } 1992 1993 /* 1994 * 9 for the header and 1 for the checksum, plus 1995 * possibly one for the broadcast. 1996 */ 1997 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1998 ipmi_inc_stat(intf, sent_invalid_commands); 1999 return -EMSGSIZE; 2000 } 2001 2002 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2003 if (ipmb_addr->lun > 3) { 2004 ipmi_inc_stat(intf, sent_invalid_commands); 2005 return -EINVAL; 2006 } 2007 2008 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2009 2010 if (recv_msg->msg.netfn & 0x1) { 2011 /* 2012 * It's a response, so use the user's sequence 2013 * from msgid. 2014 */ 2015 ipmi_inc_stat(intf, sent_ipmb_responses); 2016 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2017 msgid, broadcast, 2018 source_address, source_lun); 2019 2020 /* 2021 * Save the receive message so we can use it 2022 * to deliver the response. 2023 */ 2024 smi_msg->user_data = recv_msg; 2025 } else { 2026 /* It's a command, so get a sequence for it. */ 2027 unsigned long flags; 2028 2029 spin_lock_irqsave(&intf->seq_lock, flags); 2030 2031 if (is_maintenance_mode_cmd(msg)) 2032 intf->ipmb_maintenance_mode_timeout = 2033 maintenance_mode_timeout_ms; 2034 2035 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2036 /* Different default in maintenance mode */ 2037 retry_time_ms = default_maintenance_retry_ms; 2038 2039 /* 2040 * Create a sequence number with a 1 second 2041 * timeout and 4 retries. 2042 */ 2043 rv = intf_next_seq(intf, 2044 recv_msg, 2045 retry_time_ms, 2046 retries, 2047 broadcast, 2048 &ipmb_seq, 2049 &seqid); 2050 if (rv) 2051 /* 2052 * We have used up all the sequence numbers, 2053 * probably, so abort. 2054 */ 2055 goto out_err; 2056 2057 ipmi_inc_stat(intf, sent_ipmb_commands); 2058 2059 /* 2060 * Store the sequence number in the message, 2061 * so that when the send message response 2062 * comes back we can start the timer. 2063 */ 2064 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2065 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2066 ipmb_seq, broadcast, 2067 source_address, source_lun); 2068 2069 /* 2070 * Copy the message into the recv message data, so we 2071 * can retransmit it later if necessary. 2072 */ 2073 memcpy(recv_msg->msg_data, smi_msg->data, 2074 smi_msg->data_size); 2075 recv_msg->msg.data = recv_msg->msg_data; 2076 recv_msg->msg.data_len = smi_msg->data_size; 2077 2078 /* 2079 * We don't unlock until here, because we need 2080 * to copy the completed message into the 2081 * recv_msg before we release the lock. 2082 * Otherwise, race conditions may bite us. I 2083 * know that's pretty paranoid, but I prefer 2084 * to be correct. 2085 */ 2086 out_err: 2087 spin_unlock_irqrestore(&intf->seq_lock, flags); 2088 } 2089 2090 return rv; 2091 } 2092 2093 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2094 struct ipmi_addr *addr, 2095 long msgid, 2096 struct kernel_ipmi_msg *msg, 2097 struct ipmi_smi_msg *smi_msg, 2098 struct ipmi_recv_msg *recv_msg, 2099 unsigned char source_lun) 2100 { 2101 struct ipmi_ipmb_direct_addr *daddr; 2102 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2103 2104 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2105 return -EAFNOSUPPORT; 2106 2107 /* Responses must have a completion code. */ 2108 if (!is_cmd && msg->data_len < 1) { 2109 ipmi_inc_stat(intf, sent_invalid_commands); 2110 return -EINVAL; 2111 } 2112 2113 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2114 ipmi_inc_stat(intf, sent_invalid_commands); 2115 return -EMSGSIZE; 2116 } 2117 2118 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2119 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2120 ipmi_inc_stat(intf, sent_invalid_commands); 2121 return -EINVAL; 2122 } 2123 2124 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2125 smi_msg->msgid = msgid; 2126 2127 if (is_cmd) { 2128 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2129 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2130 } else { 2131 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2132 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2133 } 2134 smi_msg->data[1] = daddr->slave_addr; 2135 smi_msg->data[3] = msg->cmd; 2136 2137 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2138 smi_msg->data_size = msg->data_len + 4; 2139 2140 smi_msg->user_data = recv_msg; 2141 2142 return 0; 2143 } 2144 2145 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2146 struct ipmi_addr *addr, 2147 long msgid, 2148 struct kernel_ipmi_msg *msg, 2149 struct ipmi_smi_msg *smi_msg, 2150 struct ipmi_recv_msg *recv_msg, 2151 unsigned char source_lun, 2152 int retries, 2153 unsigned int retry_time_ms) 2154 { 2155 struct ipmi_lan_addr *lan_addr; 2156 unsigned char ipmb_seq; 2157 long seqid; 2158 struct ipmi_channel *chans; 2159 int rv = 0; 2160 2161 if (addr->channel >= IPMI_MAX_CHANNELS) { 2162 ipmi_inc_stat(intf, sent_invalid_commands); 2163 return -EINVAL; 2164 } 2165 2166 chans = READ_ONCE(intf->channel_list)->c; 2167 2168 if ((chans[addr->channel].medium 2169 != IPMI_CHANNEL_MEDIUM_8023LAN) 2170 && (chans[addr->channel].medium 2171 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2172 ipmi_inc_stat(intf, sent_invalid_commands); 2173 return -EINVAL; 2174 } 2175 2176 /* 11 for the header and 1 for the checksum. */ 2177 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2178 ipmi_inc_stat(intf, sent_invalid_commands); 2179 return -EMSGSIZE; 2180 } 2181 2182 lan_addr = (struct ipmi_lan_addr *) addr; 2183 if (lan_addr->lun > 3) { 2184 ipmi_inc_stat(intf, sent_invalid_commands); 2185 return -EINVAL; 2186 } 2187 2188 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2189 2190 if (recv_msg->msg.netfn & 0x1) { 2191 /* 2192 * It's a response, so use the user's sequence 2193 * from msgid. 2194 */ 2195 ipmi_inc_stat(intf, sent_lan_responses); 2196 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2197 msgid, source_lun); 2198 2199 /* 2200 * Save the receive message so we can use it 2201 * to deliver the response. 2202 */ 2203 smi_msg->user_data = recv_msg; 2204 } else { 2205 /* It's a command, so get a sequence for it. */ 2206 unsigned long flags; 2207 2208 spin_lock_irqsave(&intf->seq_lock, flags); 2209 2210 /* 2211 * Create a sequence number with a 1 second 2212 * timeout and 4 retries. 2213 */ 2214 rv = intf_next_seq(intf, 2215 recv_msg, 2216 retry_time_ms, 2217 retries, 2218 0, 2219 &ipmb_seq, 2220 &seqid); 2221 if (rv) 2222 /* 2223 * We have used up all the sequence numbers, 2224 * probably, so abort. 2225 */ 2226 goto out_err; 2227 2228 ipmi_inc_stat(intf, sent_lan_commands); 2229 2230 /* 2231 * Store the sequence number in the message, 2232 * so that when the send message response 2233 * comes back we can start the timer. 2234 */ 2235 format_lan_msg(smi_msg, msg, lan_addr, 2236 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2237 ipmb_seq, source_lun); 2238 2239 /* 2240 * Copy the message into the recv message data, so we 2241 * can retransmit it later if necessary. 2242 */ 2243 memcpy(recv_msg->msg_data, smi_msg->data, 2244 smi_msg->data_size); 2245 recv_msg->msg.data = recv_msg->msg_data; 2246 recv_msg->msg.data_len = smi_msg->data_size; 2247 2248 /* 2249 * We don't unlock until here, because we need 2250 * to copy the completed message into the 2251 * recv_msg before we release the lock. 2252 * Otherwise, race conditions may bite us. I 2253 * know that's pretty paranoid, but I prefer 2254 * to be correct. 2255 */ 2256 out_err: 2257 spin_unlock_irqrestore(&intf->seq_lock, flags); 2258 } 2259 2260 return rv; 2261 } 2262 2263 /* 2264 * Separate from ipmi_request so that the user does not have to be 2265 * supplied in certain circumstances (mainly at panic time). If 2266 * messages are supplied, they will be freed, even if an error 2267 * occurs. 2268 */ 2269 static int i_ipmi_request(struct ipmi_user *user, 2270 struct ipmi_smi *intf, 2271 struct ipmi_addr *addr, 2272 long msgid, 2273 struct kernel_ipmi_msg *msg, 2274 void *user_msg_data, 2275 void *supplied_smi, 2276 struct ipmi_recv_msg *supplied_recv, 2277 int priority, 2278 unsigned char source_address, 2279 unsigned char source_lun, 2280 int retries, 2281 unsigned int retry_time_ms) 2282 { 2283 struct ipmi_smi_msg *smi_msg; 2284 struct ipmi_recv_msg *recv_msg; 2285 int rv = 0; 2286 2287 if (user) { 2288 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 2289 /* Decrement will happen at the end of the routine. */ 2290 rv = -EBUSY; 2291 goto out; 2292 } 2293 } 2294 2295 if (supplied_recv) 2296 recv_msg = supplied_recv; 2297 else { 2298 recv_msg = ipmi_alloc_recv_msg(); 2299 if (recv_msg == NULL) { 2300 rv = -ENOMEM; 2301 goto out; 2302 } 2303 } 2304 recv_msg->user_msg_data = user_msg_data; 2305 2306 if (supplied_smi) 2307 smi_msg = supplied_smi; 2308 else { 2309 smi_msg = ipmi_alloc_smi_msg(); 2310 if (smi_msg == NULL) { 2311 if (!supplied_recv) 2312 ipmi_free_recv_msg(recv_msg); 2313 rv = -ENOMEM; 2314 goto out; 2315 } 2316 } 2317 2318 mutex_lock(&ipmi_interfaces_mutex); 2319 if (intf->in_shutdown) { 2320 rv = -ENODEV; 2321 goto out_err; 2322 } 2323 2324 recv_msg->user = user; 2325 if (user) 2326 /* The put happens when the message is freed. */ 2327 kref_get(&user->refcount); 2328 recv_msg->msgid = msgid; 2329 /* 2330 * Store the message to send in the receive message so timeout 2331 * responses can get the proper response data. 2332 */ 2333 recv_msg->msg = *msg; 2334 2335 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2336 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2337 recv_msg, retries, retry_time_ms); 2338 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2339 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2340 source_address, source_lun, 2341 retries, retry_time_ms); 2342 } else if (is_ipmb_direct_addr(addr)) { 2343 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2344 recv_msg, source_lun); 2345 } else if (is_lan_addr(addr)) { 2346 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2347 source_lun, retries, retry_time_ms); 2348 } else { 2349 /* Unknown address type. */ 2350 ipmi_inc_stat(intf, sent_invalid_commands); 2351 rv = -EINVAL; 2352 } 2353 2354 if (rv) { 2355 out_err: 2356 ipmi_free_smi_msg(smi_msg); 2357 ipmi_free_recv_msg(recv_msg); 2358 } else { 2359 dev_dbg(intf->si_dev, "Send: %*ph\n", 2360 smi_msg->data_size, smi_msg->data); 2361 2362 smi_send(intf, intf->handlers, smi_msg, priority); 2363 } 2364 mutex_unlock(&ipmi_interfaces_mutex); 2365 2366 out: 2367 if (rv && user) 2368 atomic_dec(&user->nr_msgs); 2369 return rv; 2370 } 2371 2372 static int check_addr(struct ipmi_smi *intf, 2373 struct ipmi_addr *addr, 2374 unsigned char *saddr, 2375 unsigned char *lun) 2376 { 2377 if (addr->channel >= IPMI_MAX_CHANNELS) 2378 return -EINVAL; 2379 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2380 *lun = intf->addrinfo[addr->channel].lun; 2381 *saddr = intf->addrinfo[addr->channel].address; 2382 return 0; 2383 } 2384 2385 int ipmi_request_settime(struct ipmi_user *user, 2386 struct ipmi_addr *addr, 2387 long msgid, 2388 struct kernel_ipmi_msg *msg, 2389 void *user_msg_data, 2390 int priority, 2391 int retries, 2392 unsigned int retry_time_ms) 2393 { 2394 unsigned char saddr = 0, lun = 0; 2395 int rv; 2396 2397 if (!user) 2398 return -EINVAL; 2399 2400 user = acquire_ipmi_user(user); 2401 if (!user) 2402 return -ENODEV; 2403 2404 rv = check_addr(user->intf, addr, &saddr, &lun); 2405 if (!rv) 2406 rv = i_ipmi_request(user, 2407 user->intf, 2408 addr, 2409 msgid, 2410 msg, 2411 user_msg_data, 2412 NULL, NULL, 2413 priority, 2414 saddr, 2415 lun, 2416 retries, 2417 retry_time_ms); 2418 2419 release_ipmi_user(user); 2420 return rv; 2421 } 2422 EXPORT_SYMBOL(ipmi_request_settime); 2423 2424 int ipmi_request_supply_msgs(struct ipmi_user *user, 2425 struct ipmi_addr *addr, 2426 long msgid, 2427 struct kernel_ipmi_msg *msg, 2428 void *user_msg_data, 2429 void *supplied_smi, 2430 struct ipmi_recv_msg *supplied_recv, 2431 int priority) 2432 { 2433 unsigned char saddr = 0, lun = 0; 2434 int rv; 2435 2436 if (!user) 2437 return -EINVAL; 2438 2439 user = acquire_ipmi_user(user); 2440 if (!user) 2441 return -ENODEV; 2442 2443 rv = check_addr(user->intf, addr, &saddr, &lun); 2444 if (!rv) 2445 rv = i_ipmi_request(user, 2446 user->intf, 2447 addr, 2448 msgid, 2449 msg, 2450 user_msg_data, 2451 supplied_smi, 2452 supplied_recv, 2453 priority, 2454 saddr, 2455 lun, 2456 -1, 0); 2457 2458 release_ipmi_user(user); 2459 return rv; 2460 } 2461 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2462 2463 static void bmc_device_id_handler(struct ipmi_smi *intf, 2464 struct ipmi_recv_msg *msg) 2465 { 2466 int rv; 2467 2468 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2469 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2470 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2471 dev_warn(intf->si_dev, 2472 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2473 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2474 return; 2475 } 2476 2477 if (msg->msg.data[0]) { 2478 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2479 msg->msg.data[0]); 2480 intf->bmc->dyn_id_set = 0; 2481 goto out; 2482 } 2483 2484 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2485 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2486 if (rv) { 2487 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2488 /* record completion code when error */ 2489 intf->bmc->cc = msg->msg.data[0]; 2490 intf->bmc->dyn_id_set = 0; 2491 } else { 2492 /* 2493 * Make sure the id data is available before setting 2494 * dyn_id_set. 2495 */ 2496 smp_wmb(); 2497 intf->bmc->dyn_id_set = 1; 2498 } 2499 out: 2500 wake_up(&intf->waitq); 2501 } 2502 2503 static int 2504 send_get_device_id_cmd(struct ipmi_smi *intf) 2505 { 2506 struct ipmi_system_interface_addr si; 2507 struct kernel_ipmi_msg msg; 2508 2509 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2510 si.channel = IPMI_BMC_CHANNEL; 2511 si.lun = 0; 2512 2513 msg.netfn = IPMI_NETFN_APP_REQUEST; 2514 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2515 msg.data = NULL; 2516 msg.data_len = 0; 2517 2518 return i_ipmi_request(NULL, 2519 intf, 2520 (struct ipmi_addr *) &si, 2521 0, 2522 &msg, 2523 intf, 2524 NULL, 2525 NULL, 2526 0, 2527 intf->addrinfo[0].address, 2528 intf->addrinfo[0].lun, 2529 -1, 0); 2530 } 2531 2532 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2533 { 2534 int rv; 2535 unsigned int retry_count = 0; 2536 2537 intf->null_user_handler = bmc_device_id_handler; 2538 2539 retry: 2540 bmc->cc = 0; 2541 bmc->dyn_id_set = 2; 2542 2543 rv = send_get_device_id_cmd(intf); 2544 if (rv) 2545 goto out_reset_handler; 2546 2547 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2548 2549 if (!bmc->dyn_id_set) { 2550 if (bmc->cc != IPMI_CC_NO_ERROR && 2551 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2552 msleep(500); 2553 dev_warn(intf->si_dev, 2554 "BMC returned 0x%2.2x, retry get bmc device id\n", 2555 bmc->cc); 2556 goto retry; 2557 } 2558 2559 rv = -EIO; /* Something went wrong in the fetch. */ 2560 } 2561 2562 /* dyn_id_set makes the id data available. */ 2563 smp_rmb(); 2564 2565 out_reset_handler: 2566 intf->null_user_handler = NULL; 2567 2568 return rv; 2569 } 2570 2571 /* 2572 * Fetch the device id for the bmc/interface. You must pass in either 2573 * bmc or intf, this code will get the other one. If the data has 2574 * been recently fetched, this will just use the cached data. Otherwise 2575 * it will run a new fetch. 2576 * 2577 * Except for the first time this is called (in ipmi_add_smi()), 2578 * this will always return good data; 2579 */ 2580 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2581 struct ipmi_device_id *id, 2582 bool *guid_set, guid_t *guid, int intf_num) 2583 { 2584 int rv = 0; 2585 int prev_dyn_id_set, prev_guid_set; 2586 bool intf_set = intf != NULL; 2587 2588 if (!intf) { 2589 mutex_lock(&bmc->dyn_mutex); 2590 retry_bmc_lock: 2591 if (list_empty(&bmc->intfs)) { 2592 mutex_unlock(&bmc->dyn_mutex); 2593 return -ENOENT; 2594 } 2595 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2596 bmc_link); 2597 kref_get(&intf->refcount); 2598 mutex_unlock(&bmc->dyn_mutex); 2599 mutex_lock(&intf->bmc_reg_mutex); 2600 mutex_lock(&bmc->dyn_mutex); 2601 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2602 bmc_link)) { 2603 mutex_unlock(&intf->bmc_reg_mutex); 2604 kref_put(&intf->refcount, intf_free); 2605 goto retry_bmc_lock; 2606 } 2607 } else { 2608 mutex_lock(&intf->bmc_reg_mutex); 2609 bmc = intf->bmc; 2610 mutex_lock(&bmc->dyn_mutex); 2611 kref_get(&intf->refcount); 2612 } 2613 2614 /* If we have a valid and current ID, just return that. */ 2615 if (intf->in_bmc_register || 2616 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2617 goto out_noprocessing; 2618 2619 prev_guid_set = bmc->dyn_guid_set; 2620 __get_guid(intf); 2621 2622 prev_dyn_id_set = bmc->dyn_id_set; 2623 rv = __get_device_id(intf, bmc); 2624 if (rv) 2625 goto out; 2626 2627 /* 2628 * The guid, device id, manufacturer id, and product id should 2629 * not change on a BMC. If it does we have to do some dancing. 2630 */ 2631 if (!intf->bmc_registered 2632 || (!prev_guid_set && bmc->dyn_guid_set) 2633 || (!prev_dyn_id_set && bmc->dyn_id_set) 2634 || (prev_guid_set && bmc->dyn_guid_set 2635 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2636 || bmc->id.device_id != bmc->fetch_id.device_id 2637 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2638 || bmc->id.product_id != bmc->fetch_id.product_id) { 2639 struct ipmi_device_id id = bmc->fetch_id; 2640 int guid_set = bmc->dyn_guid_set; 2641 guid_t guid; 2642 2643 guid = bmc->fetch_guid; 2644 mutex_unlock(&bmc->dyn_mutex); 2645 2646 __ipmi_bmc_unregister(intf); 2647 /* Fill in the temporary BMC for good measure. */ 2648 intf->bmc->id = id; 2649 intf->bmc->dyn_guid_set = guid_set; 2650 intf->bmc->guid = guid; 2651 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2652 need_waiter(intf); /* Retry later on an error. */ 2653 else 2654 __scan_channels(intf, &id); 2655 2656 2657 if (!intf_set) { 2658 /* 2659 * We weren't given the interface on the 2660 * command line, so restart the operation on 2661 * the next interface for the BMC. 2662 */ 2663 mutex_unlock(&intf->bmc_reg_mutex); 2664 mutex_lock(&bmc->dyn_mutex); 2665 goto retry_bmc_lock; 2666 } 2667 2668 /* We have a new BMC, set it up. */ 2669 bmc = intf->bmc; 2670 mutex_lock(&bmc->dyn_mutex); 2671 goto out_noprocessing; 2672 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2673 /* Version info changes, scan the channels again. */ 2674 __scan_channels(intf, &bmc->fetch_id); 2675 2676 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2677 2678 out: 2679 if (rv && prev_dyn_id_set) { 2680 rv = 0; /* Ignore failures if we have previous data. */ 2681 bmc->dyn_id_set = prev_dyn_id_set; 2682 } 2683 if (!rv) { 2684 bmc->id = bmc->fetch_id; 2685 if (bmc->dyn_guid_set) 2686 bmc->guid = bmc->fetch_guid; 2687 else if (prev_guid_set) 2688 /* 2689 * The guid used to be valid and it failed to fetch, 2690 * just use the cached value. 2691 */ 2692 bmc->dyn_guid_set = prev_guid_set; 2693 } 2694 out_noprocessing: 2695 if (!rv) { 2696 if (id) 2697 *id = bmc->id; 2698 2699 if (guid_set) 2700 *guid_set = bmc->dyn_guid_set; 2701 2702 if (guid && bmc->dyn_guid_set) 2703 *guid = bmc->guid; 2704 } 2705 2706 mutex_unlock(&bmc->dyn_mutex); 2707 mutex_unlock(&intf->bmc_reg_mutex); 2708 2709 kref_put(&intf->refcount, intf_free); 2710 return rv; 2711 } 2712 2713 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2714 struct ipmi_device_id *id, 2715 bool *guid_set, guid_t *guid) 2716 { 2717 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2718 } 2719 2720 static ssize_t device_id_show(struct device *dev, 2721 struct device_attribute *attr, 2722 char *buf) 2723 { 2724 struct bmc_device *bmc = to_bmc_device(dev); 2725 struct ipmi_device_id id; 2726 int rv; 2727 2728 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2729 if (rv) 2730 return rv; 2731 2732 return sysfs_emit(buf, "%u\n", id.device_id); 2733 } 2734 static DEVICE_ATTR_RO(device_id); 2735 2736 static ssize_t provides_device_sdrs_show(struct device *dev, 2737 struct device_attribute *attr, 2738 char *buf) 2739 { 2740 struct bmc_device *bmc = to_bmc_device(dev); 2741 struct ipmi_device_id id; 2742 int rv; 2743 2744 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2745 if (rv) 2746 return rv; 2747 2748 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2749 } 2750 static DEVICE_ATTR_RO(provides_device_sdrs); 2751 2752 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2753 char *buf) 2754 { 2755 struct bmc_device *bmc = to_bmc_device(dev); 2756 struct ipmi_device_id id; 2757 int rv; 2758 2759 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2760 if (rv) 2761 return rv; 2762 2763 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2764 } 2765 static DEVICE_ATTR_RO(revision); 2766 2767 static ssize_t firmware_revision_show(struct device *dev, 2768 struct device_attribute *attr, 2769 char *buf) 2770 { 2771 struct bmc_device *bmc = to_bmc_device(dev); 2772 struct ipmi_device_id id; 2773 int rv; 2774 2775 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2776 if (rv) 2777 return rv; 2778 2779 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2780 id.firmware_revision_2); 2781 } 2782 static DEVICE_ATTR_RO(firmware_revision); 2783 2784 static ssize_t ipmi_version_show(struct device *dev, 2785 struct device_attribute *attr, 2786 char *buf) 2787 { 2788 struct bmc_device *bmc = to_bmc_device(dev); 2789 struct ipmi_device_id id; 2790 int rv; 2791 2792 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2793 if (rv) 2794 return rv; 2795 2796 return sysfs_emit(buf, "%u.%u\n", 2797 ipmi_version_major(&id), 2798 ipmi_version_minor(&id)); 2799 } 2800 static DEVICE_ATTR_RO(ipmi_version); 2801 2802 static ssize_t add_dev_support_show(struct device *dev, 2803 struct device_attribute *attr, 2804 char *buf) 2805 { 2806 struct bmc_device *bmc = to_bmc_device(dev); 2807 struct ipmi_device_id id; 2808 int rv; 2809 2810 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2811 if (rv) 2812 return rv; 2813 2814 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2815 } 2816 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2817 NULL); 2818 2819 static ssize_t manufacturer_id_show(struct device *dev, 2820 struct device_attribute *attr, 2821 char *buf) 2822 { 2823 struct bmc_device *bmc = to_bmc_device(dev); 2824 struct ipmi_device_id id; 2825 int rv; 2826 2827 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2828 if (rv) 2829 return rv; 2830 2831 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2832 } 2833 static DEVICE_ATTR_RO(manufacturer_id); 2834 2835 static ssize_t product_id_show(struct device *dev, 2836 struct device_attribute *attr, 2837 char *buf) 2838 { 2839 struct bmc_device *bmc = to_bmc_device(dev); 2840 struct ipmi_device_id id; 2841 int rv; 2842 2843 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2844 if (rv) 2845 return rv; 2846 2847 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2848 } 2849 static DEVICE_ATTR_RO(product_id); 2850 2851 static ssize_t aux_firmware_rev_show(struct device *dev, 2852 struct device_attribute *attr, 2853 char *buf) 2854 { 2855 struct bmc_device *bmc = to_bmc_device(dev); 2856 struct ipmi_device_id id; 2857 int rv; 2858 2859 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2860 if (rv) 2861 return rv; 2862 2863 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2864 id.aux_firmware_revision[3], 2865 id.aux_firmware_revision[2], 2866 id.aux_firmware_revision[1], 2867 id.aux_firmware_revision[0]); 2868 } 2869 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2870 2871 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2872 char *buf) 2873 { 2874 struct bmc_device *bmc = to_bmc_device(dev); 2875 bool guid_set; 2876 guid_t guid; 2877 int rv; 2878 2879 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2880 if (rv) 2881 return rv; 2882 if (!guid_set) 2883 return -ENOENT; 2884 2885 return sysfs_emit(buf, "%pUl\n", &guid); 2886 } 2887 static DEVICE_ATTR_RO(guid); 2888 2889 static struct attribute *bmc_dev_attrs[] = { 2890 &dev_attr_device_id.attr, 2891 &dev_attr_provides_device_sdrs.attr, 2892 &dev_attr_revision.attr, 2893 &dev_attr_firmware_revision.attr, 2894 &dev_attr_ipmi_version.attr, 2895 &dev_attr_additional_device_support.attr, 2896 &dev_attr_manufacturer_id.attr, 2897 &dev_attr_product_id.attr, 2898 &dev_attr_aux_firmware_revision.attr, 2899 &dev_attr_guid.attr, 2900 NULL 2901 }; 2902 2903 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2904 struct attribute *attr, int idx) 2905 { 2906 struct device *dev = kobj_to_dev(kobj); 2907 struct bmc_device *bmc = to_bmc_device(dev); 2908 umode_t mode = attr->mode; 2909 int rv; 2910 2911 if (attr == &dev_attr_aux_firmware_revision.attr) { 2912 struct ipmi_device_id id; 2913 2914 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2915 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2916 } 2917 if (attr == &dev_attr_guid.attr) { 2918 bool guid_set; 2919 2920 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2921 return (!rv && guid_set) ? mode : 0; 2922 } 2923 return mode; 2924 } 2925 2926 static const struct attribute_group bmc_dev_attr_group = { 2927 .attrs = bmc_dev_attrs, 2928 .is_visible = bmc_dev_attr_is_visible, 2929 }; 2930 2931 static const struct attribute_group *bmc_dev_attr_groups[] = { 2932 &bmc_dev_attr_group, 2933 NULL 2934 }; 2935 2936 static const struct device_type bmc_device_type = { 2937 .groups = bmc_dev_attr_groups, 2938 }; 2939 2940 static int __find_bmc_guid(struct device *dev, const void *data) 2941 { 2942 const guid_t *guid = data; 2943 struct bmc_device *bmc; 2944 int rv; 2945 2946 if (dev->type != &bmc_device_type) 2947 return 0; 2948 2949 bmc = to_bmc_device(dev); 2950 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2951 if (rv) 2952 rv = kref_get_unless_zero(&bmc->usecount); 2953 return rv; 2954 } 2955 2956 /* 2957 * Returns with the bmc's usecount incremented, if it is non-NULL. 2958 */ 2959 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2960 guid_t *guid) 2961 { 2962 struct device *dev; 2963 struct bmc_device *bmc = NULL; 2964 2965 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2966 if (dev) { 2967 bmc = to_bmc_device(dev); 2968 put_device(dev); 2969 } 2970 return bmc; 2971 } 2972 2973 struct prod_dev_id { 2974 unsigned int product_id; 2975 unsigned char device_id; 2976 }; 2977 2978 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2979 { 2980 const struct prod_dev_id *cid = data; 2981 struct bmc_device *bmc; 2982 int rv; 2983 2984 if (dev->type != &bmc_device_type) 2985 return 0; 2986 2987 bmc = to_bmc_device(dev); 2988 rv = (bmc->id.product_id == cid->product_id 2989 && bmc->id.device_id == cid->device_id); 2990 if (rv) 2991 rv = kref_get_unless_zero(&bmc->usecount); 2992 return rv; 2993 } 2994 2995 /* 2996 * Returns with the bmc's usecount incremented, if it is non-NULL. 2997 */ 2998 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2999 struct device_driver *drv, 3000 unsigned int product_id, unsigned char device_id) 3001 { 3002 struct prod_dev_id id = { 3003 .product_id = product_id, 3004 .device_id = device_id, 3005 }; 3006 struct device *dev; 3007 struct bmc_device *bmc = NULL; 3008 3009 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3010 if (dev) { 3011 bmc = to_bmc_device(dev); 3012 put_device(dev); 3013 } 3014 return bmc; 3015 } 3016 3017 static DEFINE_IDA(ipmi_bmc_ida); 3018 3019 static void 3020 release_bmc_device(struct device *dev) 3021 { 3022 kfree(to_bmc_device(dev)); 3023 } 3024 3025 static void cleanup_bmc_work(struct work_struct *work) 3026 { 3027 struct bmc_device *bmc = container_of(work, struct bmc_device, 3028 remove_work); 3029 int id = bmc->pdev.id; /* Unregister overwrites id */ 3030 3031 platform_device_unregister(&bmc->pdev); 3032 ida_free(&ipmi_bmc_ida, id); 3033 } 3034 3035 static void 3036 cleanup_bmc_device(struct kref *ref) 3037 { 3038 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3039 3040 /* 3041 * Remove the platform device in a work queue to avoid issues 3042 * with removing the device attributes while reading a device 3043 * attribute. 3044 */ 3045 queue_work(bmc_remove_work_wq, &bmc->remove_work); 3046 } 3047 3048 /* 3049 * Must be called with intf->bmc_reg_mutex held. 3050 */ 3051 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3052 { 3053 struct bmc_device *bmc = intf->bmc; 3054 3055 if (!intf->bmc_registered) 3056 return; 3057 3058 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3059 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3060 kfree(intf->my_dev_name); 3061 intf->my_dev_name = NULL; 3062 3063 mutex_lock(&bmc->dyn_mutex); 3064 list_del(&intf->bmc_link); 3065 mutex_unlock(&bmc->dyn_mutex); 3066 intf->bmc = &intf->tmp_bmc; 3067 kref_put(&bmc->usecount, cleanup_bmc_device); 3068 intf->bmc_registered = false; 3069 } 3070 3071 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3072 { 3073 mutex_lock(&intf->bmc_reg_mutex); 3074 __ipmi_bmc_unregister(intf); 3075 mutex_unlock(&intf->bmc_reg_mutex); 3076 } 3077 3078 /* 3079 * Must be called with intf->bmc_reg_mutex held. 3080 */ 3081 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3082 struct ipmi_device_id *id, 3083 bool guid_set, guid_t *guid, int intf_num) 3084 { 3085 int rv; 3086 struct bmc_device *bmc; 3087 struct bmc_device *old_bmc; 3088 3089 /* 3090 * platform_device_register() can cause bmc_reg_mutex to 3091 * be claimed because of the is_visible functions of 3092 * the attributes. Eliminate possible recursion and 3093 * release the lock. 3094 */ 3095 intf->in_bmc_register = true; 3096 mutex_unlock(&intf->bmc_reg_mutex); 3097 3098 /* 3099 * Try to find if there is an bmc_device struct 3100 * representing the interfaced BMC already 3101 */ 3102 mutex_lock(&ipmidriver_mutex); 3103 if (guid_set) 3104 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3105 else 3106 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3107 id->product_id, 3108 id->device_id); 3109 3110 /* 3111 * If there is already an bmc_device, free the new one, 3112 * otherwise register the new BMC device 3113 */ 3114 if (old_bmc) { 3115 bmc = old_bmc; 3116 /* 3117 * Note: old_bmc already has usecount incremented by 3118 * the BMC find functions. 3119 */ 3120 intf->bmc = old_bmc; 3121 mutex_lock(&bmc->dyn_mutex); 3122 list_add_tail(&intf->bmc_link, &bmc->intfs); 3123 mutex_unlock(&bmc->dyn_mutex); 3124 3125 dev_info(intf->si_dev, 3126 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3127 bmc->id.manufacturer_id, 3128 bmc->id.product_id, 3129 bmc->id.device_id); 3130 } else { 3131 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3132 if (!bmc) { 3133 rv = -ENOMEM; 3134 goto out; 3135 } 3136 INIT_LIST_HEAD(&bmc->intfs); 3137 mutex_init(&bmc->dyn_mutex); 3138 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3139 3140 bmc->id = *id; 3141 bmc->dyn_id_set = 1; 3142 bmc->dyn_guid_set = guid_set; 3143 bmc->guid = *guid; 3144 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3145 3146 bmc->pdev.name = "ipmi_bmc"; 3147 3148 rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL); 3149 if (rv < 0) { 3150 kfree(bmc); 3151 goto out; 3152 } 3153 3154 bmc->pdev.dev.driver = &ipmidriver.driver; 3155 bmc->pdev.id = rv; 3156 bmc->pdev.dev.release = release_bmc_device; 3157 bmc->pdev.dev.type = &bmc_device_type; 3158 kref_init(&bmc->usecount); 3159 3160 intf->bmc = bmc; 3161 mutex_lock(&bmc->dyn_mutex); 3162 list_add_tail(&intf->bmc_link, &bmc->intfs); 3163 mutex_unlock(&bmc->dyn_mutex); 3164 3165 rv = platform_device_register(&bmc->pdev); 3166 if (rv) { 3167 dev_err(intf->si_dev, 3168 "Unable to register bmc device: %d\n", 3169 rv); 3170 goto out_list_del; 3171 } 3172 3173 dev_info(intf->si_dev, 3174 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3175 bmc->id.manufacturer_id, 3176 bmc->id.product_id, 3177 bmc->id.device_id); 3178 } 3179 3180 /* 3181 * create symlink from system interface device to bmc device 3182 * and back. 3183 */ 3184 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3185 if (rv) { 3186 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3187 goto out_put_bmc; 3188 } 3189 3190 if (intf_num == -1) 3191 intf_num = intf->intf_num; 3192 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3193 if (!intf->my_dev_name) { 3194 rv = -ENOMEM; 3195 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3196 rv); 3197 goto out_unlink1; 3198 } 3199 3200 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3201 intf->my_dev_name); 3202 if (rv) { 3203 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3204 rv); 3205 goto out_free_my_dev_name; 3206 } 3207 3208 intf->bmc_registered = true; 3209 3210 out: 3211 mutex_unlock(&ipmidriver_mutex); 3212 mutex_lock(&intf->bmc_reg_mutex); 3213 intf->in_bmc_register = false; 3214 return rv; 3215 3216 3217 out_free_my_dev_name: 3218 kfree(intf->my_dev_name); 3219 intf->my_dev_name = NULL; 3220 3221 out_unlink1: 3222 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3223 3224 out_put_bmc: 3225 mutex_lock(&bmc->dyn_mutex); 3226 list_del(&intf->bmc_link); 3227 mutex_unlock(&bmc->dyn_mutex); 3228 intf->bmc = &intf->tmp_bmc; 3229 kref_put(&bmc->usecount, cleanup_bmc_device); 3230 goto out; 3231 3232 out_list_del: 3233 mutex_lock(&bmc->dyn_mutex); 3234 list_del(&intf->bmc_link); 3235 mutex_unlock(&bmc->dyn_mutex); 3236 intf->bmc = &intf->tmp_bmc; 3237 put_device(&bmc->pdev.dev); 3238 goto out; 3239 } 3240 3241 static int 3242 send_guid_cmd(struct ipmi_smi *intf, int chan) 3243 { 3244 struct kernel_ipmi_msg msg; 3245 struct ipmi_system_interface_addr si; 3246 3247 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3248 si.channel = IPMI_BMC_CHANNEL; 3249 si.lun = 0; 3250 3251 msg.netfn = IPMI_NETFN_APP_REQUEST; 3252 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3253 msg.data = NULL; 3254 msg.data_len = 0; 3255 return i_ipmi_request(NULL, 3256 intf, 3257 (struct ipmi_addr *) &si, 3258 0, 3259 &msg, 3260 intf, 3261 NULL, 3262 NULL, 3263 0, 3264 intf->addrinfo[0].address, 3265 intf->addrinfo[0].lun, 3266 -1, 0); 3267 } 3268 3269 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3270 { 3271 struct bmc_device *bmc = intf->bmc; 3272 3273 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3274 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3275 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3276 /* Not for me */ 3277 return; 3278 3279 if (msg->msg.data[0] != 0) { 3280 /* Error from getting the GUID, the BMC doesn't have one. */ 3281 bmc->dyn_guid_set = 0; 3282 goto out; 3283 } 3284 3285 if (msg->msg.data_len < UUID_SIZE + 1) { 3286 bmc->dyn_guid_set = 0; 3287 dev_warn(intf->si_dev, 3288 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3289 msg->msg.data_len, UUID_SIZE + 1); 3290 goto out; 3291 } 3292 3293 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3294 /* 3295 * Make sure the guid data is available before setting 3296 * dyn_guid_set. 3297 */ 3298 smp_wmb(); 3299 bmc->dyn_guid_set = 1; 3300 out: 3301 wake_up(&intf->waitq); 3302 } 3303 3304 static void __get_guid(struct ipmi_smi *intf) 3305 { 3306 int rv; 3307 struct bmc_device *bmc = intf->bmc; 3308 3309 bmc->dyn_guid_set = 2; 3310 intf->null_user_handler = guid_handler; 3311 rv = send_guid_cmd(intf, 0); 3312 if (rv) 3313 /* Send failed, no GUID available. */ 3314 bmc->dyn_guid_set = 0; 3315 else 3316 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3317 3318 /* dyn_guid_set makes the guid data available. */ 3319 smp_rmb(); 3320 3321 intf->null_user_handler = NULL; 3322 } 3323 3324 static int 3325 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3326 { 3327 struct kernel_ipmi_msg msg; 3328 unsigned char data[1]; 3329 struct ipmi_system_interface_addr si; 3330 3331 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3332 si.channel = IPMI_BMC_CHANNEL; 3333 si.lun = 0; 3334 3335 msg.netfn = IPMI_NETFN_APP_REQUEST; 3336 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3337 msg.data = data; 3338 msg.data_len = 1; 3339 data[0] = chan; 3340 return i_ipmi_request(NULL, 3341 intf, 3342 (struct ipmi_addr *) &si, 3343 0, 3344 &msg, 3345 intf, 3346 NULL, 3347 NULL, 3348 0, 3349 intf->addrinfo[0].address, 3350 intf->addrinfo[0].lun, 3351 -1, 0); 3352 } 3353 3354 static void 3355 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3356 { 3357 int rv = 0; 3358 int ch; 3359 unsigned int set = intf->curr_working_cset; 3360 struct ipmi_channel *chans; 3361 3362 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3363 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3364 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3365 /* It's the one we want */ 3366 if (msg->msg.data[0] != 0) { 3367 /* Got an error from the channel, just go on. */ 3368 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3369 /* 3370 * If the MC does not support this 3371 * command, that is legal. We just 3372 * assume it has one IPMB at channel 3373 * zero. 3374 */ 3375 intf->wchannels[set].c[0].medium 3376 = IPMI_CHANNEL_MEDIUM_IPMB; 3377 intf->wchannels[set].c[0].protocol 3378 = IPMI_CHANNEL_PROTOCOL_IPMB; 3379 3380 intf->channel_list = intf->wchannels + set; 3381 intf->channels_ready = true; 3382 wake_up(&intf->waitq); 3383 goto out; 3384 } 3385 goto next_channel; 3386 } 3387 if (msg->msg.data_len < 4) { 3388 /* Message not big enough, just go on. */ 3389 goto next_channel; 3390 } 3391 ch = intf->curr_channel; 3392 chans = intf->wchannels[set].c; 3393 chans[ch].medium = msg->msg.data[2] & 0x7f; 3394 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3395 3396 next_channel: 3397 intf->curr_channel++; 3398 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3399 intf->channel_list = intf->wchannels + set; 3400 intf->channels_ready = true; 3401 wake_up(&intf->waitq); 3402 } else { 3403 intf->channel_list = intf->wchannels + set; 3404 intf->channels_ready = true; 3405 rv = send_channel_info_cmd(intf, intf->curr_channel); 3406 } 3407 3408 if (rv) { 3409 /* Got an error somehow, just give up. */ 3410 dev_warn(intf->si_dev, 3411 "Error sending channel information for channel %d: %d\n", 3412 intf->curr_channel, rv); 3413 3414 intf->channel_list = intf->wchannels + set; 3415 intf->channels_ready = true; 3416 wake_up(&intf->waitq); 3417 } 3418 } 3419 out: 3420 return; 3421 } 3422 3423 /* 3424 * Must be holding intf->bmc_reg_mutex to call this. 3425 */ 3426 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3427 { 3428 int rv; 3429 3430 if (ipmi_version_major(id) > 1 3431 || (ipmi_version_major(id) == 1 3432 && ipmi_version_minor(id) >= 5)) { 3433 unsigned int set; 3434 3435 /* 3436 * Start scanning the channels to see what is 3437 * available. 3438 */ 3439 set = !intf->curr_working_cset; 3440 intf->curr_working_cset = set; 3441 memset(&intf->wchannels[set], 0, 3442 sizeof(struct ipmi_channel_set)); 3443 3444 intf->null_user_handler = channel_handler; 3445 intf->curr_channel = 0; 3446 rv = send_channel_info_cmd(intf, 0); 3447 if (rv) { 3448 dev_warn(intf->si_dev, 3449 "Error sending channel information for channel 0, %d\n", 3450 rv); 3451 intf->null_user_handler = NULL; 3452 return -EIO; 3453 } 3454 3455 /* Wait for the channel info to be read. */ 3456 wait_event(intf->waitq, intf->channels_ready); 3457 intf->null_user_handler = NULL; 3458 } else { 3459 unsigned int set = intf->curr_working_cset; 3460 3461 /* Assume a single IPMB channel at zero. */ 3462 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3463 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3464 intf->channel_list = intf->wchannels + set; 3465 intf->channels_ready = true; 3466 } 3467 3468 return 0; 3469 } 3470 3471 static void ipmi_poll(struct ipmi_smi *intf) 3472 { 3473 if (intf->handlers->poll) 3474 intf->handlers->poll(intf->send_info); 3475 /* In case something came in */ 3476 handle_new_recv_msgs(intf); 3477 } 3478 3479 void ipmi_poll_interface(struct ipmi_user *user) 3480 { 3481 ipmi_poll(user->intf); 3482 } 3483 EXPORT_SYMBOL(ipmi_poll_interface); 3484 3485 static ssize_t nr_users_show(struct device *dev, 3486 struct device_attribute *attr, 3487 char *buf) 3488 { 3489 struct ipmi_smi *intf = container_of(attr, 3490 struct ipmi_smi, nr_users_devattr); 3491 3492 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3493 } 3494 static DEVICE_ATTR_RO(nr_users); 3495 3496 static ssize_t nr_msgs_show(struct device *dev, 3497 struct device_attribute *attr, 3498 char *buf) 3499 { 3500 struct ipmi_smi *intf = container_of(attr, 3501 struct ipmi_smi, nr_msgs_devattr); 3502 struct ipmi_user *user; 3503 unsigned int count = 0; 3504 3505 mutex_lock(&intf->users_mutex); 3506 list_for_each_entry(user, &intf->users, link) 3507 count += atomic_read(&user->nr_msgs); 3508 mutex_unlock(&intf->users_mutex); 3509 3510 return sysfs_emit(buf, "%u\n", count); 3511 } 3512 static DEVICE_ATTR_RO(nr_msgs); 3513 3514 static void redo_bmc_reg(struct work_struct *work) 3515 { 3516 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3517 bmc_reg_work); 3518 3519 if (!intf->in_shutdown) 3520 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3521 3522 kref_put(&intf->refcount, intf_free); 3523 } 3524 3525 int ipmi_add_smi(struct module *owner, 3526 const struct ipmi_smi_handlers *handlers, 3527 void *send_info, 3528 struct device *si_dev, 3529 unsigned char slave_addr) 3530 { 3531 int i, j; 3532 int rv; 3533 struct ipmi_smi *intf, *tintf; 3534 struct list_head *link; 3535 struct ipmi_device_id id; 3536 3537 /* 3538 * Make sure the driver is actually initialized, this handles 3539 * problems with initialization order. 3540 */ 3541 rv = ipmi_init_msghandler(); 3542 if (rv) 3543 return rv; 3544 3545 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3546 if (!intf) 3547 return -ENOMEM; 3548 3549 intf->owner = owner; 3550 intf->bmc = &intf->tmp_bmc; 3551 INIT_LIST_HEAD(&intf->bmc->intfs); 3552 mutex_init(&intf->bmc->dyn_mutex); 3553 INIT_LIST_HEAD(&intf->bmc_link); 3554 mutex_init(&intf->bmc_reg_mutex); 3555 intf->intf_num = -1; /* Mark it invalid for now. */ 3556 kref_init(&intf->refcount); 3557 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3558 intf->si_dev = si_dev; 3559 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3560 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3561 intf->addrinfo[j].lun = 2; 3562 } 3563 if (slave_addr != 0) 3564 intf->addrinfo[0].address = slave_addr; 3565 INIT_LIST_HEAD(&intf->user_msgs); 3566 mutex_init(&intf->user_msgs_mutex); 3567 INIT_LIST_HEAD(&intf->users); 3568 mutex_init(&intf->users_mutex); 3569 atomic_set(&intf->nr_users, 0); 3570 intf->handlers = handlers; 3571 intf->send_info = send_info; 3572 spin_lock_init(&intf->seq_lock); 3573 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3574 intf->seq_table[j].inuse = 0; 3575 intf->seq_table[j].seqid = 0; 3576 } 3577 intf->curr_seq = 0; 3578 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3579 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3580 INIT_WORK(&intf->smi_work, smi_work); 3581 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3582 spin_lock_init(&intf->xmit_msgs_lock); 3583 INIT_LIST_HEAD(&intf->xmit_msgs); 3584 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3585 mutex_init(&intf->events_mutex); 3586 spin_lock_init(&intf->watch_lock); 3587 atomic_set(&intf->event_waiters, 0); 3588 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3589 INIT_LIST_HEAD(&intf->waiting_events); 3590 intf->waiting_events_count = 0; 3591 mutex_init(&intf->cmd_rcvrs_mutex); 3592 spin_lock_init(&intf->maintenance_mode_lock); 3593 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3594 init_waitqueue_head(&intf->waitq); 3595 for (i = 0; i < IPMI_NUM_STATS; i++) 3596 atomic_set(&intf->stats[i], 0); 3597 3598 /* 3599 * Grab the watchers mutex so we can deliver the new interface 3600 * without races. 3601 */ 3602 mutex_lock(&smi_watchers_mutex); 3603 mutex_lock(&ipmi_interfaces_mutex); 3604 /* Look for a hole in the numbers. */ 3605 i = 0; 3606 link = &ipmi_interfaces; 3607 list_for_each_entry(tintf, &ipmi_interfaces, link) { 3608 if (tintf->intf_num != i) { 3609 link = &tintf->link; 3610 break; 3611 } 3612 i++; 3613 } 3614 /* Add the new interface in numeric order. */ 3615 if (i == 0) 3616 list_add(&intf->link, &ipmi_interfaces); 3617 else 3618 list_add_tail(&intf->link, link); 3619 3620 rv = handlers->start_processing(send_info, intf); 3621 if (rv) 3622 goto out_err; 3623 3624 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3625 if (rv) { 3626 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3627 goto out_err_started; 3628 } 3629 3630 mutex_lock(&intf->bmc_reg_mutex); 3631 rv = __scan_channels(intf, &id); 3632 mutex_unlock(&intf->bmc_reg_mutex); 3633 if (rv) 3634 goto out_err_bmc_reg; 3635 3636 intf->nr_users_devattr = dev_attr_nr_users; 3637 sysfs_attr_init(&intf->nr_users_devattr.attr); 3638 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3639 if (rv) 3640 goto out_err_bmc_reg; 3641 3642 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3643 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3644 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3645 if (rv) { 3646 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3647 goto out_err_bmc_reg; 3648 } 3649 3650 intf->intf_num = i; 3651 mutex_unlock(&ipmi_interfaces_mutex); 3652 3653 /* After this point the interface is legal to use. */ 3654 call_smi_watchers(i, intf->si_dev); 3655 3656 mutex_unlock(&smi_watchers_mutex); 3657 3658 return 0; 3659 3660 out_err_bmc_reg: 3661 ipmi_bmc_unregister(intf); 3662 out_err_started: 3663 if (intf->handlers->shutdown) 3664 intf->handlers->shutdown(intf->send_info); 3665 out_err: 3666 list_del(&intf->link); 3667 mutex_unlock(&ipmi_interfaces_mutex); 3668 mutex_unlock(&smi_watchers_mutex); 3669 kref_put(&intf->refcount, intf_free); 3670 3671 return rv; 3672 } 3673 EXPORT_SYMBOL(ipmi_add_smi); 3674 3675 static void deliver_smi_err_response(struct ipmi_smi *intf, 3676 struct ipmi_smi_msg *msg, 3677 unsigned char err) 3678 { 3679 int rv; 3680 msg->rsp[0] = msg->data[0] | 4; 3681 msg->rsp[1] = msg->data[1]; 3682 msg->rsp[2] = err; 3683 msg->rsp_size = 3; 3684 3685 /* This will never requeue, but it may ask us to free the message. */ 3686 rv = handle_one_recv_msg(intf, msg); 3687 if (rv == 0) 3688 ipmi_free_smi_msg(msg); 3689 } 3690 3691 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3692 { 3693 int i; 3694 struct seq_table *ent; 3695 struct ipmi_smi_msg *msg; 3696 struct list_head *entry; 3697 struct list_head tmplist; 3698 3699 /* Clear out our transmit queues and hold the messages. */ 3700 INIT_LIST_HEAD(&tmplist); 3701 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3702 list_splice_tail(&intf->xmit_msgs, &tmplist); 3703 3704 /* Current message first, to preserve order */ 3705 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3706 /* Wait for the message to clear out. */ 3707 schedule_timeout(1); 3708 } 3709 3710 /* No need for locks, the interface is down. */ 3711 3712 /* 3713 * Return errors for all pending messages in queue and in the 3714 * tables waiting for remote responses. 3715 */ 3716 while (!list_empty(&tmplist)) { 3717 entry = tmplist.next; 3718 list_del(entry); 3719 msg = list_entry(entry, struct ipmi_smi_msg, link); 3720 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3721 } 3722 3723 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3724 ent = &intf->seq_table[i]; 3725 if (!ent->inuse) 3726 continue; 3727 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3728 } 3729 } 3730 3731 void ipmi_unregister_smi(struct ipmi_smi *intf) 3732 { 3733 struct ipmi_smi_watcher *w; 3734 int intf_num; 3735 3736 if (!intf) 3737 return; 3738 3739 intf_num = intf->intf_num; 3740 mutex_lock(&ipmi_interfaces_mutex); 3741 cancel_work_sync(&intf->smi_work); 3742 /* smi_work() can no longer be in progress after this. */ 3743 3744 intf->intf_num = -1; 3745 intf->in_shutdown = true; 3746 list_del(&intf->link); 3747 mutex_unlock(&ipmi_interfaces_mutex); 3748 3749 /* At this point no users can be added to the interface. */ 3750 3751 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3752 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3753 3754 /* 3755 * Call all the watcher interfaces to tell them that 3756 * an interface is going away. 3757 */ 3758 mutex_lock(&smi_watchers_mutex); 3759 list_for_each_entry(w, &smi_watchers, link) 3760 w->smi_gone(intf_num); 3761 mutex_unlock(&smi_watchers_mutex); 3762 3763 mutex_lock(&intf->users_mutex); 3764 while (!list_empty(&intf->users)) { 3765 struct ipmi_user *user = list_first_entry(&intf->users, 3766 struct ipmi_user, link); 3767 3768 _ipmi_destroy_user(user); 3769 } 3770 mutex_unlock(&intf->users_mutex); 3771 3772 if (intf->handlers->shutdown) 3773 intf->handlers->shutdown(intf->send_info); 3774 3775 cleanup_smi_msgs(intf); 3776 3777 ipmi_bmc_unregister(intf); 3778 3779 kref_put(&intf->refcount, intf_free); 3780 } 3781 EXPORT_SYMBOL(ipmi_unregister_smi); 3782 3783 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3784 struct ipmi_smi_msg *msg) 3785 { 3786 struct ipmi_ipmb_addr ipmb_addr; 3787 struct ipmi_recv_msg *recv_msg; 3788 3789 /* 3790 * This is 11, not 10, because the response must contain a 3791 * completion code. 3792 */ 3793 if (msg->rsp_size < 11) { 3794 /* Message not big enough, just ignore it. */ 3795 ipmi_inc_stat(intf, invalid_ipmb_responses); 3796 return 0; 3797 } 3798 3799 if (msg->rsp[2] != 0) { 3800 /* An error getting the response, just ignore it. */ 3801 return 0; 3802 } 3803 3804 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3805 ipmb_addr.slave_addr = msg->rsp[6]; 3806 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3807 ipmb_addr.lun = msg->rsp[7] & 3; 3808 3809 /* 3810 * It's a response from a remote entity. Look up the sequence 3811 * number and handle the response. 3812 */ 3813 if (intf_find_seq(intf, 3814 msg->rsp[7] >> 2, 3815 msg->rsp[3] & 0x0f, 3816 msg->rsp[8], 3817 (msg->rsp[4] >> 2) & (~1), 3818 (struct ipmi_addr *) &ipmb_addr, 3819 &recv_msg)) { 3820 /* 3821 * We were unable to find the sequence number, 3822 * so just nuke the message. 3823 */ 3824 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3825 return 0; 3826 } 3827 3828 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3829 /* 3830 * The other fields matched, so no need to set them, except 3831 * for netfn, which needs to be the response that was 3832 * returned, not the request value. 3833 */ 3834 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3835 recv_msg->msg.data = recv_msg->msg_data; 3836 recv_msg->msg.data_len = msg->rsp_size - 10; 3837 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3838 if (deliver_response(intf, recv_msg)) 3839 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3840 else 3841 ipmi_inc_stat(intf, handled_ipmb_responses); 3842 3843 return 0; 3844 } 3845 3846 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3847 struct ipmi_smi_msg *msg) 3848 { 3849 struct cmd_rcvr *rcvr; 3850 int rv = 0; 3851 unsigned char netfn; 3852 unsigned char cmd; 3853 unsigned char chan; 3854 struct ipmi_user *user = NULL; 3855 struct ipmi_ipmb_addr *ipmb_addr; 3856 struct ipmi_recv_msg *recv_msg; 3857 3858 if (msg->rsp_size < 10) { 3859 /* Message not big enough, just ignore it. */ 3860 ipmi_inc_stat(intf, invalid_commands); 3861 return 0; 3862 } 3863 3864 if (msg->rsp[2] != 0) { 3865 /* An error getting the response, just ignore it. */ 3866 return 0; 3867 } 3868 3869 netfn = msg->rsp[4] >> 2; 3870 cmd = msg->rsp[8]; 3871 chan = msg->rsp[3] & 0xf; 3872 3873 rcu_read_lock(); 3874 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3875 if (rcvr) { 3876 user = rcvr->user; 3877 kref_get(&user->refcount); 3878 } else 3879 user = NULL; 3880 rcu_read_unlock(); 3881 3882 if (user == NULL) { 3883 /* We didn't find a user, deliver an error response. */ 3884 ipmi_inc_stat(intf, unhandled_commands); 3885 3886 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3887 msg->data[1] = IPMI_SEND_MSG_CMD; 3888 msg->data[2] = msg->rsp[3]; 3889 msg->data[3] = msg->rsp[6]; 3890 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3891 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3892 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3893 /* rqseq/lun */ 3894 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3895 msg->data[8] = msg->rsp[8]; /* cmd */ 3896 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3897 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3898 msg->data_size = 11; 3899 3900 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3901 msg->data_size, msg->data); 3902 3903 mutex_lock(&ipmi_interfaces_mutex); 3904 if (!intf->in_shutdown) { 3905 smi_send(intf, intf->handlers, msg, 0); 3906 /* 3907 * We used the message, so return the value 3908 * that causes it to not be freed or 3909 * queued. 3910 */ 3911 rv = -1; 3912 } 3913 mutex_unlock(&ipmi_interfaces_mutex); 3914 } else { 3915 recv_msg = ipmi_alloc_recv_msg(); 3916 if (!recv_msg) { 3917 /* 3918 * We couldn't allocate memory for the 3919 * message, so requeue it for handling 3920 * later. 3921 */ 3922 rv = 1; 3923 kref_put(&user->refcount, free_ipmi_user); 3924 } else { 3925 /* Extract the source address from the data. */ 3926 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3927 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3928 ipmb_addr->slave_addr = msg->rsp[6]; 3929 ipmb_addr->lun = msg->rsp[7] & 3; 3930 ipmb_addr->channel = msg->rsp[3] & 0xf; 3931 3932 /* 3933 * Extract the rest of the message information 3934 * from the IPMB header. 3935 */ 3936 recv_msg->user = user; 3937 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3938 recv_msg->msgid = msg->rsp[7] >> 2; 3939 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3940 recv_msg->msg.cmd = msg->rsp[8]; 3941 recv_msg->msg.data = recv_msg->msg_data; 3942 3943 /* 3944 * We chop off 10, not 9 bytes because the checksum 3945 * at the end also needs to be removed. 3946 */ 3947 recv_msg->msg.data_len = msg->rsp_size - 10; 3948 memcpy(recv_msg->msg_data, &msg->rsp[9], 3949 msg->rsp_size - 10); 3950 if (deliver_response(intf, recv_msg)) 3951 ipmi_inc_stat(intf, unhandled_commands); 3952 else 3953 ipmi_inc_stat(intf, handled_commands); 3954 } 3955 } 3956 3957 return rv; 3958 } 3959 3960 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3961 struct ipmi_smi_msg *msg) 3962 { 3963 struct cmd_rcvr *rcvr; 3964 int rv = 0; 3965 struct ipmi_user *user = NULL; 3966 struct ipmi_ipmb_direct_addr *daddr; 3967 struct ipmi_recv_msg *recv_msg; 3968 unsigned char netfn = msg->rsp[0] >> 2; 3969 unsigned char cmd = msg->rsp[3]; 3970 3971 rcu_read_lock(); 3972 /* We always use channel 0 for direct messages. */ 3973 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3974 if (rcvr) { 3975 user = rcvr->user; 3976 kref_get(&user->refcount); 3977 } else 3978 user = NULL; 3979 rcu_read_unlock(); 3980 3981 if (user == NULL) { 3982 /* We didn't find a user, deliver an error response. */ 3983 ipmi_inc_stat(intf, unhandled_commands); 3984 3985 msg->data[0] = (netfn + 1) << 2; 3986 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 3987 msg->data[1] = msg->rsp[1]; /* Addr */ 3988 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 3989 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 3990 msg->data[3] = cmd; 3991 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 3992 msg->data_size = 5; 3993 3994 mutex_lock(&ipmi_interfaces_mutex); 3995 if (!intf->in_shutdown) { 3996 smi_send(intf, intf->handlers, msg, 0); 3997 /* 3998 * We used the message, so return the value 3999 * that causes it to not be freed or 4000 * queued. 4001 */ 4002 rv = -1; 4003 } 4004 mutex_unlock(&ipmi_interfaces_mutex); 4005 } else { 4006 recv_msg = ipmi_alloc_recv_msg(); 4007 if (!recv_msg) { 4008 /* 4009 * We couldn't allocate memory for the 4010 * message, so requeue it for handling 4011 * later. 4012 */ 4013 rv = 1; 4014 kref_put(&user->refcount, free_ipmi_user); 4015 } else { 4016 /* Extract the source address from the data. */ 4017 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4018 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4019 daddr->channel = 0; 4020 daddr->slave_addr = msg->rsp[1]; 4021 daddr->rs_lun = msg->rsp[0] & 3; 4022 daddr->rq_lun = msg->rsp[2] & 3; 4023 4024 /* 4025 * Extract the rest of the message information 4026 * from the IPMB header. 4027 */ 4028 recv_msg->user = user; 4029 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4030 recv_msg->msgid = (msg->rsp[2] >> 2); 4031 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4032 recv_msg->msg.cmd = msg->rsp[3]; 4033 recv_msg->msg.data = recv_msg->msg_data; 4034 4035 recv_msg->msg.data_len = msg->rsp_size - 4; 4036 memcpy(recv_msg->msg_data, msg->rsp + 4, 4037 msg->rsp_size - 4); 4038 if (deliver_response(intf, recv_msg)) 4039 ipmi_inc_stat(intf, unhandled_commands); 4040 else 4041 ipmi_inc_stat(intf, handled_commands); 4042 } 4043 } 4044 4045 return rv; 4046 } 4047 4048 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4049 struct ipmi_smi_msg *msg) 4050 { 4051 struct ipmi_recv_msg *recv_msg; 4052 struct ipmi_ipmb_direct_addr *daddr; 4053 4054 recv_msg = msg->user_data; 4055 if (recv_msg == NULL) { 4056 dev_warn(intf->si_dev, 4057 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4058 return 0; 4059 } 4060 4061 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4062 recv_msg->msgid = msg->msgid; 4063 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4064 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4065 daddr->channel = 0; 4066 daddr->slave_addr = msg->rsp[1]; 4067 daddr->rq_lun = msg->rsp[0] & 3; 4068 daddr->rs_lun = msg->rsp[2] & 3; 4069 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4070 recv_msg->msg.cmd = msg->rsp[3]; 4071 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4072 recv_msg->msg.data = recv_msg->msg_data; 4073 recv_msg->msg.data_len = msg->rsp_size - 4; 4074 deliver_local_response(intf, recv_msg); 4075 4076 return 0; 4077 } 4078 4079 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4080 struct ipmi_smi_msg *msg) 4081 { 4082 struct ipmi_lan_addr lan_addr; 4083 struct ipmi_recv_msg *recv_msg; 4084 4085 4086 /* 4087 * This is 13, not 12, because the response must contain a 4088 * completion code. 4089 */ 4090 if (msg->rsp_size < 13) { 4091 /* Message not big enough, just ignore it. */ 4092 ipmi_inc_stat(intf, invalid_lan_responses); 4093 return 0; 4094 } 4095 4096 if (msg->rsp[2] != 0) { 4097 /* An error getting the response, just ignore it. */ 4098 return 0; 4099 } 4100 4101 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4102 lan_addr.session_handle = msg->rsp[4]; 4103 lan_addr.remote_SWID = msg->rsp[8]; 4104 lan_addr.local_SWID = msg->rsp[5]; 4105 lan_addr.channel = msg->rsp[3] & 0x0f; 4106 lan_addr.privilege = msg->rsp[3] >> 4; 4107 lan_addr.lun = msg->rsp[9] & 3; 4108 4109 /* 4110 * It's a response from a remote entity. Look up the sequence 4111 * number and handle the response. 4112 */ 4113 if (intf_find_seq(intf, 4114 msg->rsp[9] >> 2, 4115 msg->rsp[3] & 0x0f, 4116 msg->rsp[10], 4117 (msg->rsp[6] >> 2) & (~1), 4118 (struct ipmi_addr *) &lan_addr, 4119 &recv_msg)) { 4120 /* 4121 * We were unable to find the sequence number, 4122 * so just nuke the message. 4123 */ 4124 ipmi_inc_stat(intf, unhandled_lan_responses); 4125 return 0; 4126 } 4127 4128 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4129 /* 4130 * The other fields matched, so no need to set them, except 4131 * for netfn, which needs to be the response that was 4132 * returned, not the request value. 4133 */ 4134 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4135 recv_msg->msg.data = recv_msg->msg_data; 4136 recv_msg->msg.data_len = msg->rsp_size - 12; 4137 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4138 if (deliver_response(intf, recv_msg)) 4139 ipmi_inc_stat(intf, unhandled_lan_responses); 4140 else 4141 ipmi_inc_stat(intf, handled_lan_responses); 4142 4143 return 0; 4144 } 4145 4146 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4147 struct ipmi_smi_msg *msg) 4148 { 4149 struct cmd_rcvr *rcvr; 4150 int rv = 0; 4151 unsigned char netfn; 4152 unsigned char cmd; 4153 unsigned char chan; 4154 struct ipmi_user *user = NULL; 4155 struct ipmi_lan_addr *lan_addr; 4156 struct ipmi_recv_msg *recv_msg; 4157 4158 if (msg->rsp_size < 12) { 4159 /* Message not big enough, just ignore it. */ 4160 ipmi_inc_stat(intf, invalid_commands); 4161 return 0; 4162 } 4163 4164 if (msg->rsp[2] != 0) { 4165 /* An error getting the response, just ignore it. */ 4166 return 0; 4167 } 4168 4169 netfn = msg->rsp[6] >> 2; 4170 cmd = msg->rsp[10]; 4171 chan = msg->rsp[3] & 0xf; 4172 4173 rcu_read_lock(); 4174 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4175 if (rcvr) { 4176 user = rcvr->user; 4177 kref_get(&user->refcount); 4178 } else 4179 user = NULL; 4180 rcu_read_unlock(); 4181 4182 if (user == NULL) { 4183 /* We didn't find a user, just give up. */ 4184 ipmi_inc_stat(intf, unhandled_commands); 4185 4186 /* 4187 * Don't do anything with these messages, just allow 4188 * them to be freed. 4189 */ 4190 rv = 0; 4191 } else { 4192 recv_msg = ipmi_alloc_recv_msg(); 4193 if (!recv_msg) { 4194 /* 4195 * We couldn't allocate memory for the 4196 * message, so requeue it for handling later. 4197 */ 4198 rv = 1; 4199 kref_put(&user->refcount, free_ipmi_user); 4200 } else { 4201 /* Extract the source address from the data. */ 4202 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4203 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4204 lan_addr->session_handle = msg->rsp[4]; 4205 lan_addr->remote_SWID = msg->rsp[8]; 4206 lan_addr->local_SWID = msg->rsp[5]; 4207 lan_addr->lun = msg->rsp[9] & 3; 4208 lan_addr->channel = msg->rsp[3] & 0xf; 4209 lan_addr->privilege = msg->rsp[3] >> 4; 4210 4211 /* 4212 * Extract the rest of the message information 4213 * from the IPMB header. 4214 */ 4215 recv_msg->user = user; 4216 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4217 recv_msg->msgid = msg->rsp[9] >> 2; 4218 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4219 recv_msg->msg.cmd = msg->rsp[10]; 4220 recv_msg->msg.data = recv_msg->msg_data; 4221 4222 /* 4223 * We chop off 12, not 11 bytes because the checksum 4224 * at the end also needs to be removed. 4225 */ 4226 recv_msg->msg.data_len = msg->rsp_size - 12; 4227 memcpy(recv_msg->msg_data, &msg->rsp[11], 4228 msg->rsp_size - 12); 4229 if (deliver_response(intf, recv_msg)) 4230 ipmi_inc_stat(intf, unhandled_commands); 4231 else 4232 ipmi_inc_stat(intf, handled_commands); 4233 } 4234 } 4235 4236 return rv; 4237 } 4238 4239 /* 4240 * This routine will handle "Get Message" command responses with 4241 * channels that use an OEM Medium. The message format belongs to 4242 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4243 * Chapter 22, sections 22.6 and 22.24 for more details. 4244 */ 4245 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4246 struct ipmi_smi_msg *msg) 4247 { 4248 struct cmd_rcvr *rcvr; 4249 int rv = 0; 4250 unsigned char netfn; 4251 unsigned char cmd; 4252 unsigned char chan; 4253 struct ipmi_user *user = NULL; 4254 struct ipmi_system_interface_addr *smi_addr; 4255 struct ipmi_recv_msg *recv_msg; 4256 4257 /* 4258 * We expect the OEM SW to perform error checking 4259 * so we just do some basic sanity checks 4260 */ 4261 if (msg->rsp_size < 4) { 4262 /* Message not big enough, just ignore it. */ 4263 ipmi_inc_stat(intf, invalid_commands); 4264 return 0; 4265 } 4266 4267 if (msg->rsp[2] != 0) { 4268 /* An error getting the response, just ignore it. */ 4269 return 0; 4270 } 4271 4272 /* 4273 * This is an OEM Message so the OEM needs to know how 4274 * handle the message. We do no interpretation. 4275 */ 4276 netfn = msg->rsp[0] >> 2; 4277 cmd = msg->rsp[1]; 4278 chan = msg->rsp[3] & 0xf; 4279 4280 rcu_read_lock(); 4281 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4282 if (rcvr) { 4283 user = rcvr->user; 4284 kref_get(&user->refcount); 4285 } else 4286 user = NULL; 4287 rcu_read_unlock(); 4288 4289 if (user == NULL) { 4290 /* We didn't find a user, just give up. */ 4291 ipmi_inc_stat(intf, unhandled_commands); 4292 4293 /* 4294 * Don't do anything with these messages, just allow 4295 * them to be freed. 4296 */ 4297 4298 rv = 0; 4299 } else { 4300 recv_msg = ipmi_alloc_recv_msg(); 4301 if (!recv_msg) { 4302 /* 4303 * We couldn't allocate memory for the 4304 * message, so requeue it for handling 4305 * later. 4306 */ 4307 rv = 1; 4308 kref_put(&user->refcount, free_ipmi_user); 4309 } else { 4310 /* 4311 * OEM Messages are expected to be delivered via 4312 * the system interface to SMS software. We might 4313 * need to visit this again depending on OEM 4314 * requirements 4315 */ 4316 smi_addr = ((struct ipmi_system_interface_addr *) 4317 &recv_msg->addr); 4318 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4319 smi_addr->channel = IPMI_BMC_CHANNEL; 4320 smi_addr->lun = msg->rsp[0] & 3; 4321 4322 recv_msg->user = user; 4323 recv_msg->user_msg_data = NULL; 4324 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4325 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4326 recv_msg->msg.cmd = msg->rsp[1]; 4327 recv_msg->msg.data = recv_msg->msg_data; 4328 4329 /* 4330 * The message starts at byte 4 which follows the 4331 * Channel Byte in the "GET MESSAGE" command 4332 */ 4333 recv_msg->msg.data_len = msg->rsp_size - 4; 4334 memcpy(recv_msg->msg_data, &msg->rsp[4], 4335 msg->rsp_size - 4); 4336 if (deliver_response(intf, recv_msg)) 4337 ipmi_inc_stat(intf, unhandled_commands); 4338 else 4339 ipmi_inc_stat(intf, handled_commands); 4340 } 4341 } 4342 4343 return rv; 4344 } 4345 4346 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4347 struct ipmi_smi_msg *msg) 4348 { 4349 struct ipmi_system_interface_addr *smi_addr; 4350 4351 recv_msg->msgid = 0; 4352 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4353 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4354 smi_addr->channel = IPMI_BMC_CHANNEL; 4355 smi_addr->lun = msg->rsp[0] & 3; 4356 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4357 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4358 recv_msg->msg.cmd = msg->rsp[1]; 4359 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4360 recv_msg->msg.data = recv_msg->msg_data; 4361 recv_msg->msg.data_len = msg->rsp_size - 3; 4362 } 4363 4364 static int handle_read_event_rsp(struct ipmi_smi *intf, 4365 struct ipmi_smi_msg *msg) 4366 { 4367 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4368 struct list_head msgs; 4369 struct ipmi_user *user; 4370 int rv = 0, deliver_count = 0; 4371 4372 if (msg->rsp_size < 19) { 4373 /* Message is too small to be an IPMB event. */ 4374 ipmi_inc_stat(intf, invalid_events); 4375 return 0; 4376 } 4377 4378 if (msg->rsp[2] != 0) { 4379 /* An error getting the event, just ignore it. */ 4380 return 0; 4381 } 4382 4383 INIT_LIST_HEAD(&msgs); 4384 4385 mutex_lock(&intf->events_mutex); 4386 4387 ipmi_inc_stat(intf, events); 4388 4389 /* 4390 * Allocate and fill in one message for every user that is 4391 * getting events. 4392 */ 4393 mutex_lock(&intf->users_mutex); 4394 list_for_each_entry(user, &intf->users, link) { 4395 if (!user->gets_events) 4396 continue; 4397 4398 recv_msg = ipmi_alloc_recv_msg(); 4399 if (!recv_msg) { 4400 mutex_unlock(&intf->users_mutex); 4401 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4402 link) { 4403 user = recv_msg->user; 4404 list_del(&recv_msg->link); 4405 ipmi_free_recv_msg(recv_msg); 4406 kref_put(&user->refcount, free_ipmi_user); 4407 } 4408 /* 4409 * We couldn't allocate memory for the 4410 * message, so requeue it for handling 4411 * later. 4412 */ 4413 rv = 1; 4414 goto out; 4415 } 4416 4417 deliver_count++; 4418 4419 copy_event_into_recv_msg(recv_msg, msg); 4420 recv_msg->user = user; 4421 kref_get(&user->refcount); 4422 list_add_tail(&recv_msg->link, &msgs); 4423 } 4424 mutex_unlock(&intf->users_mutex); 4425 4426 if (deliver_count) { 4427 /* Now deliver all the messages. */ 4428 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4429 list_del(&recv_msg->link); 4430 deliver_local_response(intf, recv_msg); 4431 } 4432 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4433 /* 4434 * No one to receive the message, put it in queue if there's 4435 * not already too many things in the queue. 4436 */ 4437 recv_msg = ipmi_alloc_recv_msg(); 4438 if (!recv_msg) { 4439 /* 4440 * We couldn't allocate memory for the 4441 * message, so requeue it for handling 4442 * later. 4443 */ 4444 rv = 1; 4445 goto out; 4446 } 4447 4448 copy_event_into_recv_msg(recv_msg, msg); 4449 list_add_tail(&recv_msg->link, &intf->waiting_events); 4450 intf->waiting_events_count++; 4451 } else if (!intf->event_msg_printed) { 4452 /* 4453 * There's too many things in the queue, discard this 4454 * message. 4455 */ 4456 dev_warn(intf->si_dev, 4457 "Event queue full, discarding incoming events\n"); 4458 intf->event_msg_printed = 1; 4459 } 4460 4461 out: 4462 mutex_unlock(&intf->events_mutex); 4463 4464 return rv; 4465 } 4466 4467 static int handle_bmc_rsp(struct ipmi_smi *intf, 4468 struct ipmi_smi_msg *msg) 4469 { 4470 struct ipmi_recv_msg *recv_msg; 4471 struct ipmi_system_interface_addr *smi_addr; 4472 4473 recv_msg = msg->user_data; 4474 if (recv_msg == NULL) { 4475 dev_warn(intf->si_dev, 4476 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4477 return 0; 4478 } 4479 4480 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4481 recv_msg->msgid = msg->msgid; 4482 smi_addr = ((struct ipmi_system_interface_addr *) 4483 &recv_msg->addr); 4484 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4485 smi_addr->channel = IPMI_BMC_CHANNEL; 4486 smi_addr->lun = msg->rsp[0] & 3; 4487 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4488 recv_msg->msg.cmd = msg->rsp[1]; 4489 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4490 recv_msg->msg.data = recv_msg->msg_data; 4491 recv_msg->msg.data_len = msg->rsp_size - 2; 4492 deliver_local_response(intf, recv_msg); 4493 4494 return 0; 4495 } 4496 4497 /* 4498 * Handle a received message. Return 1 if the message should be requeued, 4499 * 0 if the message should be freed, or -1 if the message should not 4500 * be freed or requeued. 4501 */ 4502 static int handle_one_recv_msg(struct ipmi_smi *intf, 4503 struct ipmi_smi_msg *msg) 4504 { 4505 int requeue = 0; 4506 int chan; 4507 unsigned char cc; 4508 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4509 4510 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4511 4512 if (msg->rsp_size < 2) { 4513 /* Message is too small to be correct. */ 4514 dev_warn(intf->si_dev, 4515 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4516 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4517 4518 return_unspecified: 4519 /* Generate an error response for the message. */ 4520 msg->rsp[0] = msg->data[0] | (1 << 2); 4521 msg->rsp[1] = msg->data[1]; 4522 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4523 msg->rsp_size = 3; 4524 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4525 /* commands must have at least 4 bytes, responses 5. */ 4526 if (is_cmd && (msg->rsp_size < 4)) { 4527 ipmi_inc_stat(intf, invalid_commands); 4528 goto out; 4529 } 4530 if (!is_cmd && (msg->rsp_size < 5)) { 4531 ipmi_inc_stat(intf, invalid_ipmb_responses); 4532 /* Construct a valid error response. */ 4533 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4534 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4535 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4536 msg->rsp[1] = msg->data[1]; /* Addr */ 4537 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4538 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4539 msg->rsp[3] = msg->data[3]; /* Cmd */ 4540 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4541 msg->rsp_size = 5; 4542 } 4543 } else if ((msg->data_size >= 2) 4544 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4545 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4546 && (msg->user_data == NULL)) { 4547 4548 if (intf->in_shutdown) 4549 goto out; 4550 4551 /* 4552 * This is the local response to a command send, start 4553 * the timer for these. The user_data will not be 4554 * NULL if this is a response send, and we will let 4555 * response sends just go through. 4556 */ 4557 4558 /* 4559 * Check for errors, if we get certain errors (ones 4560 * that mean basically we can try again later), we 4561 * ignore them and start the timer. Otherwise we 4562 * report the error immediately. 4563 */ 4564 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4565 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4566 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4567 && (msg->rsp[2] != IPMI_BUS_ERR) 4568 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4569 int ch = msg->rsp[3] & 0xf; 4570 struct ipmi_channel *chans; 4571 4572 /* Got an error sending the message, handle it. */ 4573 4574 chans = READ_ONCE(intf->channel_list)->c; 4575 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4576 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4577 ipmi_inc_stat(intf, sent_lan_command_errs); 4578 else 4579 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4580 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4581 } else 4582 /* The message was sent, start the timer. */ 4583 intf_start_seq_timer(intf, msg->msgid); 4584 requeue = 0; 4585 goto out; 4586 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4587 || (msg->rsp[1] != msg->data[1])) { 4588 /* 4589 * The NetFN and Command in the response is not even 4590 * marginally correct. 4591 */ 4592 dev_warn(intf->si_dev, 4593 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4594 (msg->data[0] >> 2) | 1, msg->data[1], 4595 msg->rsp[0] >> 2, msg->rsp[1]); 4596 4597 goto return_unspecified; 4598 } 4599 4600 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4601 if ((msg->data[0] >> 2) & 1) { 4602 /* It's a response to a sent response. */ 4603 chan = 0; 4604 cc = msg->rsp[4]; 4605 goto process_response_response; 4606 } 4607 if (is_cmd) 4608 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4609 else 4610 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4611 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4612 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4613 && (msg->user_data != NULL)) { 4614 /* 4615 * It's a response to a response we sent. For this we 4616 * deliver a send message response to the user. 4617 */ 4618 struct ipmi_recv_msg *recv_msg; 4619 4620 chan = msg->data[2] & 0x0f; 4621 if (chan >= IPMI_MAX_CHANNELS) 4622 /* Invalid channel number */ 4623 goto out; 4624 cc = msg->rsp[2]; 4625 4626 process_response_response: 4627 recv_msg = msg->user_data; 4628 4629 requeue = 0; 4630 if (!recv_msg) 4631 goto out; 4632 4633 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4634 recv_msg->msg.data = recv_msg->msg_data; 4635 recv_msg->msg_data[0] = cc; 4636 recv_msg->msg.data_len = 1; 4637 deliver_local_response(intf, recv_msg); 4638 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4639 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4640 struct ipmi_channel *chans; 4641 4642 /* It's from the receive queue. */ 4643 chan = msg->rsp[3] & 0xf; 4644 if (chan >= IPMI_MAX_CHANNELS) { 4645 /* Invalid channel number */ 4646 requeue = 0; 4647 goto out; 4648 } 4649 4650 /* 4651 * We need to make sure the channels have been initialized. 4652 * The channel_handler routine will set the "curr_channel" 4653 * equal to or greater than IPMI_MAX_CHANNELS when all the 4654 * channels for this interface have been initialized. 4655 */ 4656 if (!intf->channels_ready) { 4657 requeue = 0; /* Throw the message away */ 4658 goto out; 4659 } 4660 4661 chans = READ_ONCE(intf->channel_list)->c; 4662 4663 switch (chans[chan].medium) { 4664 case IPMI_CHANNEL_MEDIUM_IPMB: 4665 if (msg->rsp[4] & 0x04) { 4666 /* 4667 * It's a response, so find the 4668 * requesting message and send it up. 4669 */ 4670 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4671 } else { 4672 /* 4673 * It's a command to the SMS from some other 4674 * entity. Handle that. 4675 */ 4676 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4677 } 4678 break; 4679 4680 case IPMI_CHANNEL_MEDIUM_8023LAN: 4681 case IPMI_CHANNEL_MEDIUM_ASYNC: 4682 if (msg->rsp[6] & 0x04) { 4683 /* 4684 * It's a response, so find the 4685 * requesting message and send it up. 4686 */ 4687 requeue = handle_lan_get_msg_rsp(intf, msg); 4688 } else { 4689 /* 4690 * It's a command to the SMS from some other 4691 * entity. Handle that. 4692 */ 4693 requeue = handle_lan_get_msg_cmd(intf, msg); 4694 } 4695 break; 4696 4697 default: 4698 /* Check for OEM Channels. Clients had better 4699 register for these commands. */ 4700 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4701 && (chans[chan].medium 4702 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4703 requeue = handle_oem_get_msg_cmd(intf, msg); 4704 } else { 4705 /* 4706 * We don't handle the channel type, so just 4707 * free the message. 4708 */ 4709 requeue = 0; 4710 } 4711 } 4712 4713 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4714 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4715 /* It's an asynchronous event. */ 4716 requeue = handle_read_event_rsp(intf, msg); 4717 } else { 4718 /* It's a response from the local BMC. */ 4719 requeue = handle_bmc_rsp(intf, msg); 4720 } 4721 4722 out: 4723 return requeue; 4724 } 4725 4726 /* 4727 * If there are messages in the queue or pretimeouts, handle them. 4728 */ 4729 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4730 { 4731 struct ipmi_smi_msg *smi_msg; 4732 unsigned long flags = 0; 4733 int rv; 4734 int run_to_completion = READ_ONCE(intf->run_to_completion); 4735 4736 /* See if any waiting messages need to be processed. */ 4737 if (!run_to_completion) 4738 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4739 while (!list_empty(&intf->waiting_rcv_msgs)) { 4740 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4741 struct ipmi_smi_msg, link); 4742 list_del(&smi_msg->link); 4743 if (!run_to_completion) 4744 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4745 flags); 4746 rv = handle_one_recv_msg(intf, smi_msg); 4747 if (!run_to_completion) 4748 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4749 if (rv > 0) { 4750 /* 4751 * To preserve message order, quit if we 4752 * can't handle a message. Add the message 4753 * back at the head, this is safe because this 4754 * workqueue is the only thing that pulls the 4755 * messages. 4756 */ 4757 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4758 break; 4759 } else { 4760 if (rv == 0) 4761 /* Message handled */ 4762 ipmi_free_smi_msg(smi_msg); 4763 /* If rv < 0, fatal error, del but don't free. */ 4764 } 4765 } 4766 if (!run_to_completion) 4767 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4768 } 4769 4770 static void smi_work(struct work_struct *t) 4771 { 4772 unsigned long flags = 0; /* keep us warning-free. */ 4773 struct ipmi_smi *intf = from_work(intf, t, smi_work); 4774 int run_to_completion = READ_ONCE(intf->run_to_completion); 4775 struct ipmi_smi_msg *newmsg = NULL; 4776 struct ipmi_recv_msg *msg, *msg2; 4777 4778 /* 4779 * Start the next message if available. 4780 * 4781 * Do this here, not in the actual receiver, because we may deadlock 4782 * because the lower layer is allowed to hold locks while calling 4783 * message delivery. 4784 */ 4785 4786 if (!run_to_completion) 4787 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4788 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4789 struct list_head *entry = NULL; 4790 4791 /* Pick the high priority queue first. */ 4792 if (!list_empty(&intf->hp_xmit_msgs)) 4793 entry = intf->hp_xmit_msgs.next; 4794 else if (!list_empty(&intf->xmit_msgs)) 4795 entry = intf->xmit_msgs.next; 4796 4797 if (entry) { 4798 list_del(entry); 4799 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4800 intf->curr_msg = newmsg; 4801 } 4802 } 4803 if (!run_to_completion) 4804 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4805 4806 if (newmsg) 4807 intf->handlers->sender(intf->send_info, newmsg); 4808 4809 handle_new_recv_msgs(intf); 4810 4811 /* 4812 * If the pretimout count is non-zero, decrement one from it and 4813 * deliver pretimeouts to all the users. 4814 */ 4815 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4816 struct ipmi_user *user; 4817 4818 mutex_lock(&intf->users_mutex); 4819 list_for_each_entry(user, &intf->users, link) { 4820 if (user->handler->ipmi_watchdog_pretimeout) 4821 user->handler->ipmi_watchdog_pretimeout( 4822 user->handler_data); 4823 } 4824 mutex_unlock(&intf->users_mutex); 4825 } 4826 4827 mutex_lock(&intf->user_msgs_mutex); 4828 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 4829 struct ipmi_user *user = msg->user; 4830 4831 list_del(&msg->link); 4832 atomic_dec(&user->nr_msgs); 4833 user->handler->ipmi_recv_hndl(msg, user->handler_data); 4834 release_ipmi_user(user); 4835 } 4836 mutex_unlock(&intf->user_msgs_mutex); 4837 } 4838 4839 /* Handle a new message from the lower layer. */ 4840 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4841 struct ipmi_smi_msg *msg) 4842 { 4843 unsigned long flags = 0; /* keep us warning-free. */ 4844 int run_to_completion = READ_ONCE(intf->run_to_completion); 4845 4846 /* 4847 * To preserve message order, we keep a queue and deliver from 4848 * a workqueue. 4849 */ 4850 if (!run_to_completion) 4851 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4852 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4853 if (!run_to_completion) 4854 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4855 flags); 4856 4857 if (!run_to_completion) 4858 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4859 /* 4860 * We can get an asynchronous event or receive message in addition 4861 * to commands we send. 4862 */ 4863 if (msg == intf->curr_msg) 4864 intf->curr_msg = NULL; 4865 if (!run_to_completion) 4866 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4867 4868 if (run_to_completion) 4869 smi_work(&intf->smi_work); 4870 else 4871 queue_work(system_wq, &intf->smi_work); 4872 } 4873 EXPORT_SYMBOL(ipmi_smi_msg_received); 4874 4875 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4876 { 4877 if (intf->in_shutdown) 4878 return; 4879 4880 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4881 queue_work(system_wq, &intf->smi_work); 4882 } 4883 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4884 4885 static struct ipmi_smi_msg * 4886 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4887 unsigned char seq, long seqid) 4888 { 4889 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4890 if (!smi_msg) 4891 /* 4892 * If we can't allocate the message, then just return, we 4893 * get 4 retries, so this should be ok. 4894 */ 4895 return NULL; 4896 4897 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4898 smi_msg->data_size = recv_msg->msg.data_len; 4899 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4900 4901 dev_dbg(intf->si_dev, "Resend: %*ph\n", 4902 smi_msg->data_size, smi_msg->data); 4903 4904 return smi_msg; 4905 } 4906 4907 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4908 struct list_head *timeouts, 4909 unsigned long timeout_period, 4910 int slot, unsigned long *flags, 4911 bool *need_timer) 4912 { 4913 struct ipmi_recv_msg *msg; 4914 4915 if (intf->in_shutdown) 4916 return; 4917 4918 if (!ent->inuse) 4919 return; 4920 4921 if (timeout_period < ent->timeout) { 4922 ent->timeout -= timeout_period; 4923 *need_timer = true; 4924 return; 4925 } 4926 4927 if (ent->retries_left == 0) { 4928 /* The message has used all its retries. */ 4929 ent->inuse = 0; 4930 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4931 msg = ent->recv_msg; 4932 list_add_tail(&msg->link, timeouts); 4933 if (ent->broadcast) 4934 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4935 else if (is_lan_addr(&ent->recv_msg->addr)) 4936 ipmi_inc_stat(intf, timed_out_lan_commands); 4937 else 4938 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4939 } else { 4940 struct ipmi_smi_msg *smi_msg; 4941 /* More retries, send again. */ 4942 4943 *need_timer = true; 4944 4945 /* 4946 * Start with the max timer, set to normal timer after 4947 * the message is sent. 4948 */ 4949 ent->timeout = MAX_MSG_TIMEOUT; 4950 ent->retries_left--; 4951 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4952 ent->seqid); 4953 if (!smi_msg) { 4954 if (is_lan_addr(&ent->recv_msg->addr)) 4955 ipmi_inc_stat(intf, 4956 dropped_rexmit_lan_commands); 4957 else 4958 ipmi_inc_stat(intf, 4959 dropped_rexmit_ipmb_commands); 4960 return; 4961 } 4962 4963 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4964 4965 /* 4966 * Send the new message. We send with a zero 4967 * priority. It timed out, I doubt time is that 4968 * critical now, and high priority messages are really 4969 * only for messages to the local MC, which don't get 4970 * resent. 4971 */ 4972 if (intf->handlers) { 4973 if (is_lan_addr(&ent->recv_msg->addr)) 4974 ipmi_inc_stat(intf, 4975 retransmitted_lan_commands); 4976 else 4977 ipmi_inc_stat(intf, 4978 retransmitted_ipmb_commands); 4979 4980 smi_send(intf, intf->handlers, smi_msg, 0); 4981 } else 4982 ipmi_free_smi_msg(smi_msg); 4983 4984 spin_lock_irqsave(&intf->seq_lock, *flags); 4985 } 4986 } 4987 4988 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4989 unsigned long timeout_period) 4990 { 4991 struct list_head timeouts; 4992 struct ipmi_recv_msg *msg, *msg2; 4993 unsigned long flags; 4994 int i; 4995 bool need_timer = false; 4996 4997 if (!intf->bmc_registered) { 4998 kref_get(&intf->refcount); 4999 if (!schedule_work(&intf->bmc_reg_work)) { 5000 kref_put(&intf->refcount, intf_free); 5001 need_timer = true; 5002 } 5003 } 5004 5005 /* 5006 * Go through the seq table and find any messages that 5007 * have timed out, putting them in the timeouts 5008 * list. 5009 */ 5010 INIT_LIST_HEAD(&timeouts); 5011 spin_lock_irqsave(&intf->seq_lock, flags); 5012 if (intf->ipmb_maintenance_mode_timeout) { 5013 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5014 intf->ipmb_maintenance_mode_timeout = 0; 5015 else 5016 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5017 } 5018 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5019 check_msg_timeout(intf, &intf->seq_table[i], 5020 &timeouts, timeout_period, i, 5021 &flags, &need_timer); 5022 spin_unlock_irqrestore(&intf->seq_lock, flags); 5023 5024 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5025 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5026 5027 /* 5028 * Maintenance mode handling. Check the timeout 5029 * optimistically before we claim the lock. It may 5030 * mean a timeout gets missed occasionally, but that 5031 * only means the timeout gets extended by one period 5032 * in that case. No big deal, and it avoids the lock 5033 * most of the time. 5034 */ 5035 if (intf->auto_maintenance_timeout > 0) { 5036 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5037 if (intf->auto_maintenance_timeout > 0) { 5038 intf->auto_maintenance_timeout 5039 -= timeout_period; 5040 if (!intf->maintenance_mode 5041 && (intf->auto_maintenance_timeout <= 0)) { 5042 intf->maintenance_mode_enable = false; 5043 maintenance_mode_update(intf); 5044 } 5045 } 5046 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5047 flags); 5048 } 5049 5050 queue_work(system_wq, &intf->smi_work); 5051 5052 return need_timer; 5053 } 5054 5055 static void ipmi_request_event(struct ipmi_smi *intf) 5056 { 5057 /* No event requests when in maintenance mode. */ 5058 if (intf->maintenance_mode_enable) 5059 return; 5060 5061 if (!intf->in_shutdown) 5062 intf->handlers->request_events(intf->send_info); 5063 } 5064 5065 static struct timer_list ipmi_timer; 5066 5067 static atomic_t stop_operation; 5068 5069 static void ipmi_timeout_work(struct work_struct *work) 5070 { 5071 if (atomic_read(&stop_operation)) 5072 return; 5073 5074 struct ipmi_smi *intf; 5075 bool need_timer = false; 5076 5077 if (atomic_read(&stop_operation)) 5078 return; 5079 5080 mutex_lock(&ipmi_interfaces_mutex); 5081 list_for_each_entry(intf, &ipmi_interfaces, link) { 5082 if (atomic_read(&intf->event_waiters)) { 5083 intf->ticks_to_req_ev--; 5084 if (intf->ticks_to_req_ev == 0) { 5085 ipmi_request_event(intf); 5086 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5087 } 5088 need_timer = true; 5089 } 5090 5091 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5092 } 5093 mutex_unlock(&ipmi_interfaces_mutex); 5094 5095 if (need_timer) 5096 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5097 } 5098 5099 static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); 5100 5101 static void ipmi_timeout(struct timer_list *unused) 5102 { 5103 if (atomic_read(&stop_operation)) 5104 return; 5105 5106 queue_work(system_wq, &ipmi_timer_work); 5107 } 5108 5109 static void need_waiter(struct ipmi_smi *intf) 5110 { 5111 /* Racy, but worst case we start the timer twice. */ 5112 if (!timer_pending(&ipmi_timer)) 5113 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5114 } 5115 5116 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5117 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5118 5119 static void free_smi_msg(struct ipmi_smi_msg *msg) 5120 { 5121 atomic_dec(&smi_msg_inuse_count); 5122 /* Try to keep as much stuff out of the panic path as possible. */ 5123 if (!oops_in_progress) 5124 kfree(msg); 5125 } 5126 5127 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5128 { 5129 struct ipmi_smi_msg *rv; 5130 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5131 if (rv) { 5132 rv->done = free_smi_msg; 5133 rv->user_data = NULL; 5134 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5135 atomic_inc(&smi_msg_inuse_count); 5136 } 5137 return rv; 5138 } 5139 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5140 5141 static void free_recv_msg(struct ipmi_recv_msg *msg) 5142 { 5143 atomic_dec(&recv_msg_inuse_count); 5144 /* Try to keep as much stuff out of the panic path as possible. */ 5145 if (!oops_in_progress) 5146 kfree(msg); 5147 } 5148 5149 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5150 { 5151 struct ipmi_recv_msg *rv; 5152 5153 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5154 if (rv) { 5155 rv->user = NULL; 5156 rv->done = free_recv_msg; 5157 atomic_inc(&recv_msg_inuse_count); 5158 } 5159 return rv; 5160 } 5161 5162 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5163 { 5164 if (msg->user && !oops_in_progress) 5165 kref_put(&msg->user->refcount, free_ipmi_user); 5166 msg->done(msg); 5167 } 5168 EXPORT_SYMBOL(ipmi_free_recv_msg); 5169 5170 static atomic_t panic_done_count = ATOMIC_INIT(0); 5171 5172 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5173 { 5174 atomic_dec(&panic_done_count); 5175 } 5176 5177 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5178 { 5179 atomic_dec(&panic_done_count); 5180 } 5181 5182 /* 5183 * Inside a panic, send a message and wait for a response. 5184 */ 5185 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5186 struct ipmi_addr *addr, 5187 struct kernel_ipmi_msg *msg) 5188 { 5189 struct ipmi_smi_msg smi_msg; 5190 struct ipmi_recv_msg recv_msg; 5191 int rv; 5192 5193 smi_msg.done = dummy_smi_done_handler; 5194 recv_msg.done = dummy_recv_done_handler; 5195 atomic_add(2, &panic_done_count); 5196 rv = i_ipmi_request(NULL, 5197 intf, 5198 addr, 5199 0, 5200 msg, 5201 intf, 5202 &smi_msg, 5203 &recv_msg, 5204 0, 5205 intf->addrinfo[0].address, 5206 intf->addrinfo[0].lun, 5207 0, 1); /* Don't retry, and don't wait. */ 5208 if (rv) 5209 atomic_sub(2, &panic_done_count); 5210 else if (intf->handlers->flush_messages) 5211 intf->handlers->flush_messages(intf->send_info); 5212 5213 while (atomic_read(&panic_done_count) != 0) 5214 ipmi_poll(intf); 5215 } 5216 5217 static void event_receiver_fetcher(struct ipmi_smi *intf, 5218 struct ipmi_recv_msg *msg) 5219 { 5220 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5221 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5222 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5223 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5224 /* A get event receiver command, save it. */ 5225 intf->event_receiver = msg->msg.data[1]; 5226 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5227 } 5228 } 5229 5230 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5231 { 5232 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5233 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5234 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5235 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5236 /* 5237 * A get device id command, save if we are an event 5238 * receiver or generator. 5239 */ 5240 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5241 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5242 } 5243 } 5244 5245 static void send_panic_events(struct ipmi_smi *intf, char *str) 5246 { 5247 struct kernel_ipmi_msg msg; 5248 unsigned char data[16]; 5249 struct ipmi_system_interface_addr *si; 5250 struct ipmi_addr addr; 5251 char *p = str; 5252 struct ipmi_ipmb_addr *ipmb; 5253 int j; 5254 5255 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5256 return; 5257 5258 si = (struct ipmi_system_interface_addr *) &addr; 5259 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5260 si->channel = IPMI_BMC_CHANNEL; 5261 si->lun = 0; 5262 5263 /* Fill in an event telling that we have failed. */ 5264 msg.netfn = 0x04; /* Sensor or Event. */ 5265 msg.cmd = 2; /* Platform event command. */ 5266 msg.data = data; 5267 msg.data_len = 8; 5268 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5269 data[1] = 0x03; /* This is for IPMI 1.0. */ 5270 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5271 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5272 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5273 5274 /* 5275 * Put a few breadcrumbs in. Hopefully later we can add more things 5276 * to make the panic events more useful. 5277 */ 5278 if (str) { 5279 data[3] = str[0]; 5280 data[6] = str[1]; 5281 data[7] = str[2]; 5282 } 5283 5284 /* Send the event announcing the panic. */ 5285 ipmi_panic_request_and_wait(intf, &addr, &msg); 5286 5287 /* 5288 * On every interface, dump a bunch of OEM event holding the 5289 * string. 5290 */ 5291 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5292 return; 5293 5294 /* 5295 * intf_num is used as an marker to tell if the 5296 * interface is valid. Thus we need a read barrier to 5297 * make sure data fetched before checking intf_num 5298 * won't be used. 5299 */ 5300 smp_rmb(); 5301 5302 /* 5303 * First job here is to figure out where to send the 5304 * OEM events. There's no way in IPMI to send OEM 5305 * events using an event send command, so we have to 5306 * find the SEL to put them in and stick them in 5307 * there. 5308 */ 5309 5310 /* Get capabilities from the get device id. */ 5311 intf->local_sel_device = 0; 5312 intf->local_event_generator = 0; 5313 intf->event_receiver = 0; 5314 5315 /* Request the device info from the local MC. */ 5316 msg.netfn = IPMI_NETFN_APP_REQUEST; 5317 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5318 msg.data = NULL; 5319 msg.data_len = 0; 5320 intf->null_user_handler = device_id_fetcher; 5321 ipmi_panic_request_and_wait(intf, &addr, &msg); 5322 5323 if (intf->local_event_generator) { 5324 /* Request the event receiver from the local MC. */ 5325 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5326 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5327 msg.data = NULL; 5328 msg.data_len = 0; 5329 intf->null_user_handler = event_receiver_fetcher; 5330 ipmi_panic_request_and_wait(intf, &addr, &msg); 5331 } 5332 intf->null_user_handler = NULL; 5333 5334 /* 5335 * Validate the event receiver. The low bit must not 5336 * be 1 (it must be a valid IPMB address), it cannot 5337 * be zero, and it must not be my address. 5338 */ 5339 if (((intf->event_receiver & 1) == 0) 5340 && (intf->event_receiver != 0) 5341 && (intf->event_receiver != intf->addrinfo[0].address)) { 5342 /* 5343 * The event receiver is valid, send an IPMB 5344 * message. 5345 */ 5346 ipmb = (struct ipmi_ipmb_addr *) &addr; 5347 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5348 ipmb->channel = 0; /* FIXME - is this right? */ 5349 ipmb->lun = intf->event_receiver_lun; 5350 ipmb->slave_addr = intf->event_receiver; 5351 } else if (intf->local_sel_device) { 5352 /* 5353 * The event receiver was not valid (or was 5354 * me), but I am an SEL device, just dump it 5355 * in my SEL. 5356 */ 5357 si = (struct ipmi_system_interface_addr *) &addr; 5358 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5359 si->channel = IPMI_BMC_CHANNEL; 5360 si->lun = 0; 5361 } else 5362 return; /* No where to send the event. */ 5363 5364 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5365 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5366 msg.data = data; 5367 msg.data_len = 16; 5368 5369 j = 0; 5370 while (*p) { 5371 int size = strnlen(p, 11); 5372 5373 data[0] = 0; 5374 data[1] = 0; 5375 data[2] = 0xf0; /* OEM event without timestamp. */ 5376 data[3] = intf->addrinfo[0].address; 5377 data[4] = j++; /* sequence # */ 5378 5379 memcpy_and_pad(data+5, 11, p, size, '\0'); 5380 p += size; 5381 5382 ipmi_panic_request_and_wait(intf, &addr, &msg); 5383 } 5384 } 5385 5386 static int has_panicked; 5387 5388 static int panic_event(struct notifier_block *this, 5389 unsigned long event, 5390 void *ptr) 5391 { 5392 struct ipmi_smi *intf; 5393 struct ipmi_user *user; 5394 5395 if (has_panicked) 5396 return NOTIFY_DONE; 5397 has_panicked = 1; 5398 5399 /* For every registered interface, set it to run to completion. */ 5400 list_for_each_entry(intf, &ipmi_interfaces, link) { 5401 if (!intf->handlers || intf->intf_num == -1) 5402 /* Interface is not ready. */ 5403 continue; 5404 5405 if (!intf->handlers->poll) 5406 continue; 5407 5408 /* 5409 * If we were interrupted while locking xmit_msgs_lock or 5410 * waiting_rcv_msgs_lock, the corresponding list may be 5411 * corrupted. In this case, drop items on the list for 5412 * the safety. 5413 */ 5414 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5415 INIT_LIST_HEAD(&intf->xmit_msgs); 5416 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5417 } else 5418 spin_unlock(&intf->xmit_msgs_lock); 5419 5420 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5421 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5422 else 5423 spin_unlock(&intf->waiting_rcv_msgs_lock); 5424 5425 intf->run_to_completion = 1; 5426 if (intf->handlers->set_run_to_completion) 5427 intf->handlers->set_run_to_completion(intf->send_info, 5428 1); 5429 5430 list_for_each_entry(user, &intf->users, link) { 5431 if (user->handler->ipmi_panic_handler) 5432 user->handler->ipmi_panic_handler( 5433 user->handler_data); 5434 } 5435 5436 send_panic_events(intf, ptr); 5437 } 5438 5439 return NOTIFY_DONE; 5440 } 5441 5442 /* Must be called with ipmi_interfaces_mutex held. */ 5443 static int ipmi_register_driver(void) 5444 { 5445 int rv; 5446 5447 if (drvregistered) 5448 return 0; 5449 5450 rv = driver_register(&ipmidriver.driver); 5451 if (rv) 5452 pr_err("Could not register IPMI driver\n"); 5453 else 5454 drvregistered = true; 5455 return rv; 5456 } 5457 5458 static struct notifier_block panic_block = { 5459 .notifier_call = panic_event, 5460 .next = NULL, 5461 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5462 }; 5463 5464 static int ipmi_init_msghandler(void) 5465 { 5466 int rv; 5467 5468 mutex_lock(&ipmi_interfaces_mutex); 5469 rv = ipmi_register_driver(); 5470 if (rv) 5471 goto out; 5472 if (initialized) 5473 goto out; 5474 5475 bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5476 if (!bmc_remove_work_wq) { 5477 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5478 rv = -ENOMEM; 5479 goto out; 5480 } 5481 5482 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5483 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5484 5485 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5486 5487 initialized = true; 5488 5489 out: 5490 mutex_unlock(&ipmi_interfaces_mutex); 5491 return rv; 5492 } 5493 5494 static int __init ipmi_init_msghandler_mod(void) 5495 { 5496 int rv; 5497 5498 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5499 5500 mutex_lock(&ipmi_interfaces_mutex); 5501 rv = ipmi_register_driver(); 5502 mutex_unlock(&ipmi_interfaces_mutex); 5503 5504 return rv; 5505 } 5506 5507 static void __exit cleanup_ipmi(void) 5508 { 5509 int count; 5510 5511 if (initialized) { 5512 destroy_workqueue(bmc_remove_work_wq); 5513 5514 atomic_notifier_chain_unregister(&panic_notifier_list, 5515 &panic_block); 5516 5517 /* 5518 * This can't be called if any interfaces exist, so no worry 5519 * about shutting down the interfaces. 5520 */ 5521 5522 /* 5523 * Tell the timer to stop, then wait for it to stop. This 5524 * avoids problems with race conditions removing the timer 5525 * here. 5526 */ 5527 atomic_set(&stop_operation, 1); 5528 timer_delete_sync(&ipmi_timer); 5529 cancel_work_sync(&ipmi_timer_work); 5530 5531 initialized = false; 5532 5533 /* Check for buffer leaks. */ 5534 count = atomic_read(&smi_msg_inuse_count); 5535 if (count != 0) 5536 pr_warn("SMI message count %d at exit\n", count); 5537 count = atomic_read(&recv_msg_inuse_count); 5538 if (count != 0) 5539 pr_warn("recv message count %d at exit\n", count); 5540 } 5541 if (drvregistered) 5542 driver_unregister(&ipmidriver.driver); 5543 } 5544 module_exit(cleanup_ipmi); 5545 5546 module_init(ipmi_init_msghandler_mod); 5547 MODULE_LICENSE("GPL"); 5548 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5549 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5550 MODULE_VERSION(IPMI_DRIVER_VERSION); 5551 MODULE_SOFTDEP("post: ipmi_devintf"); 5552