1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "IPMI message handler: " fmt 15 #define dev_fmt(fmt) pr_fmt(fmt) 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 #include <linux/vmalloc.h> 37 #include <linux/delay.h> 38 39 #define IPMI_DRIVER_VERSION "39.2" 40 41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 42 static int ipmi_init_msghandler(void); 43 static void smi_work(struct work_struct *t); 44 static void handle_new_recv_msgs(struct ipmi_smi *intf); 45 static void need_waiter(struct ipmi_smi *intf); 46 static int handle_one_recv_msg(struct ipmi_smi *intf, 47 struct ipmi_smi_msg *msg); 48 static void intf_free(struct kref *ref); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* The default maximum number of users that may register. */ 149 static unsigned int max_users = 30; 150 module_param(max_users, uint, 0644); 151 MODULE_PARM_DESC(max_users, 152 "The most users that may use the IPMI stack at one time."); 153 154 /* The default maximum number of message a user may have outstanding. */ 155 static unsigned int max_msgs_per_user = 100; 156 module_param(max_msgs_per_user, uint, 0644); 157 MODULE_PARM_DESC(max_msgs_per_user, 158 "The most message a user may have outstanding."); 159 160 /* Call every ~1000 ms. */ 161 #define IPMI_TIMEOUT_TIME 1000 162 163 /* How many jiffies does it take to get to the timeout time. */ 164 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 165 166 /* 167 * Request events from the queue every second (this is the number of 168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 169 * future, IPMI will add a way to know immediately if an event is in 170 * the queue and this silliness can go away. 171 */ 172 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 173 174 /* How long should we cache dynamic device IDs? */ 175 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 176 177 /* 178 * The main "user" data structure. 179 */ 180 struct ipmi_user { 181 struct list_head link; 182 183 struct kref refcount; 184 refcount_t destroyed; 185 186 /* The upper layer that handles receive messages. */ 187 const struct ipmi_user_hndl *handler; 188 void *handler_data; 189 190 /* The interface this user is bound to. */ 191 struct ipmi_smi *intf; 192 193 /* Does this interface receive IPMI events? */ 194 bool gets_events; 195 196 atomic_t nr_msgs; 197 }; 198 199 struct cmd_rcvr { 200 struct list_head link; 201 202 struct ipmi_user *user; 203 unsigned char netfn; 204 unsigned char cmd; 205 unsigned int chans; 206 207 /* 208 * This is used to form a linked lised during mass deletion. 209 * Since this is in an RCU list, we cannot use the link above 210 * or change any data until the RCU period completes. So we 211 * use this next variable during mass deletion so we can have 212 * a list and don't have to wait and restart the search on 213 * every individual deletion of a command. 214 */ 215 struct cmd_rcvr *next; 216 }; 217 218 struct seq_table { 219 unsigned int inuse : 1; 220 unsigned int broadcast : 1; 221 222 unsigned long timeout; 223 unsigned long orig_timeout; 224 unsigned int retries_left; 225 226 /* 227 * To verify on an incoming send message response that this is 228 * the message that the response is for, we keep a sequence id 229 * and increment it every time we send a message. 230 */ 231 long seqid; 232 233 /* 234 * This is held so we can properly respond to the message on a 235 * timeout, and it is used to hold the temporary data for 236 * retransmission, too. 237 */ 238 struct ipmi_recv_msg *recv_msg; 239 }; 240 241 /* 242 * Store the information in a msgid (long) to allow us to find a 243 * sequence table entry from the msgid. 244 */ 245 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 246 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 247 248 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 249 do { \ 250 seq = (((msgid) >> 26) & 0x3f); \ 251 seqid = ((msgid) & 0x3ffffff); \ 252 } while (0) 253 254 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 255 256 #define IPMI_MAX_CHANNELS 16 257 struct ipmi_channel { 258 unsigned char medium; 259 unsigned char protocol; 260 }; 261 262 struct ipmi_channel_set { 263 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 264 }; 265 266 struct ipmi_my_addrinfo { 267 /* 268 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 269 * but may be changed by the user. 270 */ 271 unsigned char address; 272 273 /* 274 * My LUN. This should generally stay the SMS LUN, but just in 275 * case... 276 */ 277 unsigned char lun; 278 }; 279 280 /* 281 * Note that the product id, manufacturer id, guid, and device id are 282 * immutable in this structure, so dyn_mutex is not required for 283 * accessing those. If those change on a BMC, a new BMC is allocated. 284 */ 285 struct bmc_device { 286 struct platform_device pdev; 287 struct list_head intfs; /* Interfaces on this BMC. */ 288 struct ipmi_device_id id; 289 struct ipmi_device_id fetch_id; 290 int dyn_id_set; 291 unsigned long dyn_id_expiry; 292 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 293 guid_t guid; 294 guid_t fetch_guid; 295 int dyn_guid_set; 296 struct kref usecount; 297 struct work_struct remove_work; 298 unsigned char cc; /* completion code */ 299 }; 300 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 301 302 static struct workqueue_struct *bmc_remove_work_wq; 303 304 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 305 struct ipmi_device_id *id, 306 bool *guid_set, guid_t *guid); 307 308 /* 309 * Various statistics for IPMI, these index stats[] in the ipmi_smi 310 * structure. 311 */ 312 enum ipmi_stat_indexes { 313 /* Commands we got from the user that were invalid. */ 314 IPMI_STAT_sent_invalid_commands = 0, 315 316 /* Commands we sent to the MC. */ 317 IPMI_STAT_sent_local_commands, 318 319 /* Responses from the MC that were delivered to a user. */ 320 IPMI_STAT_handled_local_responses, 321 322 /* Responses from the MC that were not delivered to a user. */ 323 IPMI_STAT_unhandled_local_responses, 324 325 /* Commands we sent out to the IPMB bus. */ 326 IPMI_STAT_sent_ipmb_commands, 327 328 /* Commands sent on the IPMB that had errors on the SEND CMD */ 329 IPMI_STAT_sent_ipmb_command_errs, 330 331 /* Each retransmit increments this count. */ 332 IPMI_STAT_retransmitted_ipmb_commands, 333 334 /* 335 * When a message times out (runs out of retransmits) this is 336 * incremented. 337 */ 338 IPMI_STAT_timed_out_ipmb_commands, 339 340 /* 341 * This is like above, but for broadcasts. Broadcasts are 342 * *not* included in the above count (they are expected to 343 * time out). 344 */ 345 IPMI_STAT_timed_out_ipmb_broadcasts, 346 347 /* Responses I have sent to the IPMB bus. */ 348 IPMI_STAT_sent_ipmb_responses, 349 350 /* The response was delivered to the user. */ 351 IPMI_STAT_handled_ipmb_responses, 352 353 /* The response had invalid data in it. */ 354 IPMI_STAT_invalid_ipmb_responses, 355 356 /* The response didn't have anyone waiting for it. */ 357 IPMI_STAT_unhandled_ipmb_responses, 358 359 /* Commands we sent out to the IPMB bus. */ 360 IPMI_STAT_sent_lan_commands, 361 362 /* Commands sent on the IPMB that had errors on the SEND CMD */ 363 IPMI_STAT_sent_lan_command_errs, 364 365 /* Each retransmit increments this count. */ 366 IPMI_STAT_retransmitted_lan_commands, 367 368 /* 369 * When a message times out (runs out of retransmits) this is 370 * incremented. 371 */ 372 IPMI_STAT_timed_out_lan_commands, 373 374 /* Responses I have sent to the IPMB bus. */ 375 IPMI_STAT_sent_lan_responses, 376 377 /* The response was delivered to the user. */ 378 IPMI_STAT_handled_lan_responses, 379 380 /* The response had invalid data in it. */ 381 IPMI_STAT_invalid_lan_responses, 382 383 /* The response didn't have anyone waiting for it. */ 384 IPMI_STAT_unhandled_lan_responses, 385 386 /* The command was delivered to the user. */ 387 IPMI_STAT_handled_commands, 388 389 /* The command had invalid data in it. */ 390 IPMI_STAT_invalid_commands, 391 392 /* The command didn't have anyone waiting for it. */ 393 IPMI_STAT_unhandled_commands, 394 395 /* Invalid data in an event. */ 396 IPMI_STAT_invalid_events, 397 398 /* Events that were received with the proper format. */ 399 IPMI_STAT_events, 400 401 /* Retransmissions on IPMB that failed. */ 402 IPMI_STAT_dropped_rexmit_ipmb_commands, 403 404 /* Retransmissions on LAN that failed. */ 405 IPMI_STAT_dropped_rexmit_lan_commands, 406 407 /* This *must* remain last, add new values above this. */ 408 IPMI_NUM_STATS 409 }; 410 411 412 #define IPMI_IPMB_NUM_SEQ 64 413 struct ipmi_smi { 414 struct module *owner; 415 416 /* What interface number are we? */ 417 int intf_num; 418 419 struct kref refcount; 420 421 /* Set when the interface is being unregistered. */ 422 bool in_shutdown; 423 424 /* Used for a list of interfaces. */ 425 struct list_head link; 426 427 /* 428 * The list of upper layers that are using me. 429 */ 430 struct list_head users; 431 struct mutex users_mutex; 432 atomic_t nr_users; 433 struct device_attribute nr_users_devattr; 434 struct device_attribute nr_msgs_devattr; 435 436 437 /* Used for wake ups at startup. */ 438 wait_queue_head_t waitq; 439 440 /* 441 * Prevents the interface from being unregistered when the 442 * interface is used by being looked up through the BMC 443 * structure. 444 */ 445 struct mutex bmc_reg_mutex; 446 447 struct bmc_device tmp_bmc; 448 struct bmc_device *bmc; 449 bool bmc_registered; 450 struct list_head bmc_link; 451 char *my_dev_name; 452 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 453 struct work_struct bmc_reg_work; 454 455 const struct ipmi_smi_handlers *handlers; 456 void *send_info; 457 458 /* Driver-model device for the system interface. */ 459 struct device *si_dev; 460 461 /* 462 * A table of sequence numbers for this interface. We use the 463 * sequence numbers for IPMB messages that go out of the 464 * interface to match them up with their responses. A routine 465 * is called periodically to time the items in this list. 466 */ 467 spinlock_t seq_lock; 468 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 469 int curr_seq; 470 471 /* 472 * Messages queued for deliver to the user. 473 */ 474 struct mutex user_msgs_mutex; 475 struct list_head user_msgs; 476 477 /* 478 * Messages queued for processing. If processing fails (out 479 * of memory for instance), They will stay in here to be 480 * processed later in a periodic timer interrupt. The 481 * workqueue is for handling received messages directly from 482 * the handler. 483 */ 484 spinlock_t waiting_rcv_msgs_lock; 485 struct list_head waiting_rcv_msgs; 486 atomic_t watchdog_pretimeouts_to_deliver; 487 struct work_struct smi_work; 488 489 spinlock_t xmit_msgs_lock; 490 struct list_head xmit_msgs; 491 struct ipmi_smi_msg *curr_msg; 492 struct list_head hp_xmit_msgs; 493 494 /* 495 * The list of command receivers that are registered for commands 496 * on this interface. 497 */ 498 struct mutex cmd_rcvrs_mutex; 499 struct list_head cmd_rcvrs; 500 501 /* 502 * Events that were queues because no one was there to receive 503 * them. 504 */ 505 struct mutex events_mutex; /* For dealing with event stuff. */ 506 struct list_head waiting_events; 507 unsigned int waiting_events_count; /* How many events in queue? */ 508 char event_msg_printed; 509 510 /* How many users are waiting for events? */ 511 atomic_t event_waiters; 512 unsigned int ticks_to_req_ev; 513 514 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 515 516 /* How many users are waiting for commands? */ 517 unsigned int command_waiters; 518 519 /* How many users are waiting for watchdogs? */ 520 unsigned int watchdog_waiters; 521 522 /* How many users are waiting for message responses? */ 523 unsigned int response_waiters; 524 525 /* 526 * Tells what the lower layer has last been asked to watch for, 527 * messages and/or watchdogs. Protected by watch_lock. 528 */ 529 unsigned int last_watch_mask; 530 531 /* 532 * The event receiver for my BMC, only really used at panic 533 * shutdown as a place to store this. 534 */ 535 unsigned char event_receiver; 536 unsigned char event_receiver_lun; 537 unsigned char local_sel_device; 538 unsigned char local_event_generator; 539 540 /* For handling of maintenance mode. */ 541 int maintenance_mode; 542 bool maintenance_mode_enable; 543 int auto_maintenance_timeout; 544 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 545 546 /* 547 * If we are doing maintenance on something on IPMB, extend 548 * the timeout time to avoid timeouts writing firmware and 549 * such. 550 */ 551 int ipmb_maintenance_mode_timeout; 552 553 /* 554 * A cheap hack, if this is non-null and a message to an 555 * interface comes in with a NULL user, call this routine with 556 * it. Note that the message will still be freed by the 557 * caller. This only works on the system interface. 558 * 559 * Protected by bmc_reg_mutex. 560 */ 561 void (*null_user_handler)(struct ipmi_smi *intf, 562 struct ipmi_recv_msg *msg); 563 564 /* 565 * When we are scanning the channels for an SMI, this will 566 * tell which channel we are scanning. 567 */ 568 int curr_channel; 569 570 /* Channel information */ 571 struct ipmi_channel_set *channel_list; 572 unsigned int curr_working_cset; /* First index into the following. */ 573 struct ipmi_channel_set wchannels[2]; 574 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 575 bool channels_ready; 576 577 atomic_t stats[IPMI_NUM_STATS]; 578 579 /* 580 * run_to_completion duplicate of smb_info, smi_info 581 * and ipmi_serial_info structures. Used to decrease numbers of 582 * parameters passed by "low" level IPMI code. 583 */ 584 int run_to_completion; 585 }; 586 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 587 588 static void __get_guid(struct ipmi_smi *intf); 589 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 590 static int __ipmi_bmc_register(struct ipmi_smi *intf, 591 struct ipmi_device_id *id, 592 bool guid_set, guid_t *guid, int intf_num); 593 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 594 595 static void free_ipmi_user(struct kref *ref) 596 { 597 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 598 struct module *owner; 599 600 owner = user->intf->owner; 601 kref_put(&user->intf->refcount, intf_free); 602 module_put(owner); 603 vfree(user); 604 } 605 606 static void release_ipmi_user(struct ipmi_user *user) 607 { 608 kref_put(&user->refcount, free_ipmi_user); 609 } 610 611 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user) 612 { 613 if (!kref_get_unless_zero(&user->refcount)) 614 return NULL; 615 return user; 616 } 617 618 /* 619 * The driver model view of the IPMI messaging driver. 620 */ 621 static struct platform_driver ipmidriver = { 622 .driver = { 623 .name = "ipmi", 624 .bus = &platform_bus_type 625 } 626 }; 627 /* 628 * This mutex keeps us from adding the same BMC twice. 629 */ 630 static DEFINE_MUTEX(ipmidriver_mutex); 631 632 static LIST_HEAD(ipmi_interfaces); 633 static DEFINE_MUTEX(ipmi_interfaces_mutex); 634 635 /* 636 * List of watchers that want to know when smi's are added and deleted. 637 */ 638 static LIST_HEAD(smi_watchers); 639 static DEFINE_MUTEX(smi_watchers_mutex); 640 641 #define ipmi_inc_stat(intf, stat) \ 642 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 643 #define ipmi_get_stat(intf, stat) \ 644 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 645 646 static const char * const addr_src_to_str[] = { 647 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 648 "device-tree", "platform" 649 }; 650 651 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 652 { 653 if (src >= SI_LAST) 654 src = 0; /* Invalid */ 655 return addr_src_to_str[src]; 656 } 657 EXPORT_SYMBOL(ipmi_addr_src_to_str); 658 659 static int is_lan_addr(struct ipmi_addr *addr) 660 { 661 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 662 } 663 664 static int is_ipmb_addr(struct ipmi_addr *addr) 665 { 666 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 667 } 668 669 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 670 { 671 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 672 } 673 674 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 675 { 676 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 677 } 678 679 static void free_recv_msg_list(struct list_head *q) 680 { 681 struct ipmi_recv_msg *msg, *msg2; 682 683 list_for_each_entry_safe(msg, msg2, q, link) { 684 list_del(&msg->link); 685 ipmi_free_recv_msg(msg); 686 } 687 } 688 689 static void free_smi_msg_list(struct list_head *q) 690 { 691 struct ipmi_smi_msg *msg, *msg2; 692 693 list_for_each_entry_safe(msg, msg2, q, link) { 694 list_del(&msg->link); 695 ipmi_free_smi_msg(msg); 696 } 697 } 698 699 static void intf_free(struct kref *ref) 700 { 701 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 702 int i; 703 struct cmd_rcvr *rcvr, *rcvr2; 704 705 free_smi_msg_list(&intf->waiting_rcv_msgs); 706 free_recv_msg_list(&intf->waiting_events); 707 708 /* 709 * Wholesale remove all the entries from the list in the 710 * interface. No need for locks, this is single-threaded. 711 */ 712 list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link) 713 kfree(rcvr); 714 715 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 716 if ((intf->seq_table[i].inuse) 717 && (intf->seq_table[i].recv_msg)) 718 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 719 } 720 721 kfree(intf); 722 } 723 724 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 725 { 726 struct ipmi_smi *intf; 727 unsigned int count = 0, i; 728 int *interfaces = NULL; 729 struct device **devices = NULL; 730 int rv = 0; 731 732 /* 733 * Make sure the driver is actually initialized, this handles 734 * problems with initialization order. 735 */ 736 rv = ipmi_init_msghandler(); 737 if (rv) 738 return rv; 739 740 mutex_lock(&smi_watchers_mutex); 741 742 list_add(&watcher->link, &smi_watchers); 743 744 /* 745 * Build an array of ipmi interfaces and fill it in, and 746 * another array of the devices. We can't call the callback 747 * with ipmi_interfaces_mutex held. smi_watchers_mutex will 748 * keep things in order for the user. 749 */ 750 mutex_lock(&ipmi_interfaces_mutex); 751 list_for_each_entry(intf, &ipmi_interfaces, link) 752 count++; 753 if (count > 0) { 754 interfaces = kmalloc_array(count, sizeof(*interfaces), 755 GFP_KERNEL); 756 if (!interfaces) { 757 rv = -ENOMEM; 758 } else { 759 devices = kmalloc_array(count, sizeof(*devices), 760 GFP_KERNEL); 761 if (!devices) { 762 kfree(interfaces); 763 interfaces = NULL; 764 rv = -ENOMEM; 765 } 766 } 767 count = 0; 768 } 769 if (interfaces) { 770 list_for_each_entry(intf, &ipmi_interfaces, link) { 771 int intf_num = READ_ONCE(intf->intf_num); 772 773 if (intf_num == -1) 774 continue; 775 devices[count] = intf->si_dev; 776 interfaces[count++] = intf_num; 777 } 778 } 779 mutex_unlock(&ipmi_interfaces_mutex); 780 781 if (interfaces) { 782 for (i = 0; i < count; i++) 783 watcher->new_smi(interfaces[i], devices[i]); 784 kfree(interfaces); 785 kfree(devices); 786 } 787 788 mutex_unlock(&smi_watchers_mutex); 789 790 return rv; 791 } 792 EXPORT_SYMBOL(ipmi_smi_watcher_register); 793 794 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 795 { 796 mutex_lock(&smi_watchers_mutex); 797 list_del(&watcher->link); 798 mutex_unlock(&smi_watchers_mutex); 799 return 0; 800 } 801 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 802 803 static void 804 call_smi_watchers(int i, struct device *dev) 805 { 806 struct ipmi_smi_watcher *w; 807 808 list_for_each_entry(w, &smi_watchers, link) { 809 if (try_module_get(w->owner)) { 810 w->new_smi(i, dev); 811 module_put(w->owner); 812 } 813 } 814 } 815 816 static int 817 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 818 { 819 if (addr1->addr_type != addr2->addr_type) 820 return 0; 821 822 if (addr1->channel != addr2->channel) 823 return 0; 824 825 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 826 struct ipmi_system_interface_addr *smi_addr1 827 = (struct ipmi_system_interface_addr *) addr1; 828 struct ipmi_system_interface_addr *smi_addr2 829 = (struct ipmi_system_interface_addr *) addr2; 830 return (smi_addr1->lun == smi_addr2->lun); 831 } 832 833 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 834 struct ipmi_ipmb_addr *ipmb_addr1 835 = (struct ipmi_ipmb_addr *) addr1; 836 struct ipmi_ipmb_addr *ipmb_addr2 837 = (struct ipmi_ipmb_addr *) addr2; 838 839 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 840 && (ipmb_addr1->lun == ipmb_addr2->lun)); 841 } 842 843 if (is_ipmb_direct_addr(addr1)) { 844 struct ipmi_ipmb_direct_addr *daddr1 845 = (struct ipmi_ipmb_direct_addr *) addr1; 846 struct ipmi_ipmb_direct_addr *daddr2 847 = (struct ipmi_ipmb_direct_addr *) addr2; 848 849 return daddr1->slave_addr == daddr2->slave_addr && 850 daddr1->rq_lun == daddr2->rq_lun && 851 daddr1->rs_lun == daddr2->rs_lun; 852 } 853 854 if (is_lan_addr(addr1)) { 855 struct ipmi_lan_addr *lan_addr1 856 = (struct ipmi_lan_addr *) addr1; 857 struct ipmi_lan_addr *lan_addr2 858 = (struct ipmi_lan_addr *) addr2; 859 860 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 861 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 862 && (lan_addr1->session_handle 863 == lan_addr2->session_handle) 864 && (lan_addr1->lun == lan_addr2->lun)); 865 } 866 867 return 1; 868 } 869 870 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 871 { 872 if (len < sizeof(struct ipmi_system_interface_addr)) 873 return -EINVAL; 874 875 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 876 if (addr->channel != IPMI_BMC_CHANNEL) 877 return -EINVAL; 878 return 0; 879 } 880 881 if ((addr->channel == IPMI_BMC_CHANNEL) 882 || (addr->channel >= IPMI_MAX_CHANNELS) 883 || (addr->channel < 0)) 884 return -EINVAL; 885 886 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 887 if (len < sizeof(struct ipmi_ipmb_addr)) 888 return -EINVAL; 889 return 0; 890 } 891 892 if (is_ipmb_direct_addr(addr)) { 893 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 894 895 if (addr->channel != 0) 896 return -EINVAL; 897 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 898 return -EINVAL; 899 900 if (daddr->slave_addr & 0x01) 901 return -EINVAL; 902 if (daddr->rq_lun >= 4) 903 return -EINVAL; 904 if (daddr->rs_lun >= 4) 905 return -EINVAL; 906 return 0; 907 } 908 909 if (is_lan_addr(addr)) { 910 if (len < sizeof(struct ipmi_lan_addr)) 911 return -EINVAL; 912 return 0; 913 } 914 915 return -EINVAL; 916 } 917 EXPORT_SYMBOL(ipmi_validate_addr); 918 919 unsigned int ipmi_addr_length(int addr_type) 920 { 921 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 922 return sizeof(struct ipmi_system_interface_addr); 923 924 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 925 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 926 return sizeof(struct ipmi_ipmb_addr); 927 928 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 929 return sizeof(struct ipmi_ipmb_direct_addr); 930 931 if (addr_type == IPMI_LAN_ADDR_TYPE) 932 return sizeof(struct ipmi_lan_addr); 933 934 return 0; 935 } 936 EXPORT_SYMBOL(ipmi_addr_length); 937 938 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 939 { 940 int rv = 0; 941 942 if (!msg->user) { 943 /* Special handling for NULL users. */ 944 if (intf->null_user_handler) { 945 intf->null_user_handler(intf, msg); 946 } else { 947 /* No handler, so give up. */ 948 rv = -EINVAL; 949 } 950 ipmi_free_recv_msg(msg); 951 } else if (oops_in_progress) { 952 /* 953 * If we are running in the panic context, calling the 954 * receive handler doesn't much meaning and has a deadlock 955 * risk. At this moment, simply skip it in that case. 956 */ 957 ipmi_free_recv_msg(msg); 958 atomic_dec(&msg->user->nr_msgs); 959 } else { 960 /* 961 * Deliver it in smi_work. The message will hold a 962 * refcount to the user. 963 */ 964 mutex_lock(&intf->user_msgs_mutex); 965 list_add_tail(&msg->link, &intf->user_msgs); 966 mutex_unlock(&intf->user_msgs_mutex); 967 queue_work(system_wq, &intf->smi_work); 968 } 969 970 return rv; 971 } 972 973 static void deliver_local_response(struct ipmi_smi *intf, 974 struct ipmi_recv_msg *msg) 975 { 976 if (deliver_response(intf, msg)) 977 ipmi_inc_stat(intf, unhandled_local_responses); 978 else 979 ipmi_inc_stat(intf, handled_local_responses); 980 } 981 982 static void deliver_err_response(struct ipmi_smi *intf, 983 struct ipmi_recv_msg *msg, int err) 984 { 985 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 986 msg->msg_data[0] = err; 987 msg->msg.netfn |= 1; /* Convert to a response. */ 988 msg->msg.data_len = 1; 989 msg->msg.data = msg->msg_data; 990 deliver_local_response(intf, msg); 991 } 992 993 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 994 { 995 unsigned long iflags; 996 997 if (!intf->handlers->set_need_watch) 998 return; 999 1000 spin_lock_irqsave(&intf->watch_lock, iflags); 1001 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1002 intf->response_waiters++; 1003 1004 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1005 intf->watchdog_waiters++; 1006 1007 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1008 intf->command_waiters++; 1009 1010 if ((intf->last_watch_mask & flags) != flags) { 1011 intf->last_watch_mask |= flags; 1012 intf->handlers->set_need_watch(intf->send_info, 1013 intf->last_watch_mask); 1014 } 1015 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1016 } 1017 1018 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1019 { 1020 unsigned long iflags; 1021 1022 if (!intf->handlers->set_need_watch) 1023 return; 1024 1025 spin_lock_irqsave(&intf->watch_lock, iflags); 1026 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1027 intf->response_waiters--; 1028 1029 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1030 intf->watchdog_waiters--; 1031 1032 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1033 intf->command_waiters--; 1034 1035 flags = 0; 1036 if (intf->response_waiters) 1037 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1038 if (intf->watchdog_waiters) 1039 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1040 if (intf->command_waiters) 1041 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1042 1043 if (intf->last_watch_mask != flags) { 1044 intf->last_watch_mask = flags; 1045 intf->handlers->set_need_watch(intf->send_info, 1046 intf->last_watch_mask); 1047 } 1048 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1049 } 1050 1051 /* 1052 * Find the next sequence number not being used and add the given 1053 * message with the given timeout to the sequence table. This must be 1054 * called with the interface's seq_lock held. 1055 */ 1056 static int intf_next_seq(struct ipmi_smi *intf, 1057 struct ipmi_recv_msg *recv_msg, 1058 unsigned long timeout, 1059 int retries, 1060 int broadcast, 1061 unsigned char *seq, 1062 long *seqid) 1063 { 1064 int rv = 0; 1065 unsigned int i; 1066 1067 if (timeout == 0) 1068 timeout = default_retry_ms; 1069 if (retries < 0) 1070 retries = default_max_retries; 1071 1072 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1073 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1074 if (!intf->seq_table[i].inuse) 1075 break; 1076 } 1077 1078 if (!intf->seq_table[i].inuse) { 1079 intf->seq_table[i].recv_msg = recv_msg; 1080 1081 /* 1082 * Start with the maximum timeout, when the send response 1083 * comes in we will start the real timer. 1084 */ 1085 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1086 intf->seq_table[i].orig_timeout = timeout; 1087 intf->seq_table[i].retries_left = retries; 1088 intf->seq_table[i].broadcast = broadcast; 1089 intf->seq_table[i].inuse = 1; 1090 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1091 *seq = i; 1092 *seqid = intf->seq_table[i].seqid; 1093 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1094 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1095 need_waiter(intf); 1096 } else { 1097 rv = -EAGAIN; 1098 } 1099 1100 return rv; 1101 } 1102 1103 /* 1104 * Return the receive message for the given sequence number and 1105 * release the sequence number so it can be reused. Some other data 1106 * is passed in to be sure the message matches up correctly (to help 1107 * guard against message coming in after their timeout and the 1108 * sequence number being reused). 1109 */ 1110 static int intf_find_seq(struct ipmi_smi *intf, 1111 unsigned char seq, 1112 short channel, 1113 unsigned char cmd, 1114 unsigned char netfn, 1115 struct ipmi_addr *addr, 1116 struct ipmi_recv_msg **recv_msg) 1117 { 1118 int rv = -ENODEV; 1119 unsigned long flags; 1120 1121 if (seq >= IPMI_IPMB_NUM_SEQ) 1122 return -EINVAL; 1123 1124 spin_lock_irqsave(&intf->seq_lock, flags); 1125 if (intf->seq_table[seq].inuse) { 1126 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1127 1128 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1129 && (msg->msg.netfn == netfn) 1130 && (ipmi_addr_equal(addr, &msg->addr))) { 1131 *recv_msg = msg; 1132 intf->seq_table[seq].inuse = 0; 1133 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1134 rv = 0; 1135 } 1136 } 1137 spin_unlock_irqrestore(&intf->seq_lock, flags); 1138 1139 return rv; 1140 } 1141 1142 1143 /* Start the timer for a specific sequence table entry. */ 1144 static int intf_start_seq_timer(struct ipmi_smi *intf, 1145 long msgid) 1146 { 1147 int rv = -ENODEV; 1148 unsigned long flags; 1149 unsigned char seq; 1150 unsigned long seqid; 1151 1152 1153 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1154 1155 spin_lock_irqsave(&intf->seq_lock, flags); 1156 /* 1157 * We do this verification because the user can be deleted 1158 * while a message is outstanding. 1159 */ 1160 if ((intf->seq_table[seq].inuse) 1161 && (intf->seq_table[seq].seqid == seqid)) { 1162 struct seq_table *ent = &intf->seq_table[seq]; 1163 ent->timeout = ent->orig_timeout; 1164 rv = 0; 1165 } 1166 spin_unlock_irqrestore(&intf->seq_lock, flags); 1167 1168 return rv; 1169 } 1170 1171 /* Got an error for the send message for a specific sequence number. */ 1172 static int intf_err_seq(struct ipmi_smi *intf, 1173 long msgid, 1174 unsigned int err) 1175 { 1176 int rv = -ENODEV; 1177 unsigned long flags; 1178 unsigned char seq; 1179 unsigned long seqid; 1180 struct ipmi_recv_msg *msg = NULL; 1181 1182 1183 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1184 1185 spin_lock_irqsave(&intf->seq_lock, flags); 1186 /* 1187 * We do this verification because the user can be deleted 1188 * while a message is outstanding. 1189 */ 1190 if ((intf->seq_table[seq].inuse) 1191 && (intf->seq_table[seq].seqid == seqid)) { 1192 struct seq_table *ent = &intf->seq_table[seq]; 1193 1194 ent->inuse = 0; 1195 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1196 msg = ent->recv_msg; 1197 rv = 0; 1198 } 1199 spin_unlock_irqrestore(&intf->seq_lock, flags); 1200 1201 if (msg) 1202 deliver_err_response(intf, msg, err); 1203 1204 return rv; 1205 } 1206 1207 int ipmi_create_user(unsigned int if_num, 1208 const struct ipmi_user_hndl *handler, 1209 void *handler_data, 1210 struct ipmi_user **user) 1211 { 1212 unsigned long flags; 1213 struct ipmi_user *new_user = NULL; 1214 int rv = 0; 1215 struct ipmi_smi *intf; 1216 1217 /* 1218 * There is no module usecount here, because it's not 1219 * required. Since this can only be used by and called from 1220 * other modules, they will implicitly use this module, and 1221 * thus this can't be removed unless the other modules are 1222 * removed. 1223 */ 1224 1225 if (handler == NULL) 1226 return -EINVAL; 1227 1228 /* 1229 * Make sure the driver is actually initialized, this handles 1230 * problems with initialization order. 1231 */ 1232 rv = ipmi_init_msghandler(); 1233 if (rv) 1234 return rv; 1235 1236 mutex_lock(&ipmi_interfaces_mutex); 1237 list_for_each_entry(intf, &ipmi_interfaces, link) { 1238 if (intf->intf_num == if_num) 1239 goto found; 1240 } 1241 /* Not found, return an error */ 1242 rv = -EINVAL; 1243 goto out_kfree; 1244 1245 found: 1246 if (intf->in_shutdown) { 1247 rv = -ENODEV; 1248 goto out_kfree; 1249 } 1250 1251 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1252 rv = -EBUSY; 1253 goto out_kfree; 1254 } 1255 1256 new_user = vzalloc(sizeof(*new_user)); 1257 if (!new_user) { 1258 rv = -ENOMEM; 1259 goto out_kfree; 1260 } 1261 1262 if (!try_module_get(intf->owner)) { 1263 rv = -ENODEV; 1264 goto out_kfree; 1265 } 1266 1267 /* Note that each existing user holds a refcount to the interface. */ 1268 kref_get(&intf->refcount); 1269 1270 atomic_set(&new_user->nr_msgs, 0); 1271 kref_init(&new_user->refcount); 1272 refcount_set(&new_user->destroyed, 1); 1273 kref_get(&new_user->refcount); /* Destroy owns a refcount. */ 1274 new_user->handler = handler; 1275 new_user->handler_data = handler_data; 1276 new_user->intf = intf; 1277 new_user->gets_events = false; 1278 1279 mutex_lock(&intf->users_mutex); 1280 spin_lock_irqsave(&intf->seq_lock, flags); 1281 list_add(&new_user->link, &intf->users); 1282 spin_unlock_irqrestore(&intf->seq_lock, flags); 1283 mutex_unlock(&intf->users_mutex); 1284 1285 if (handler->ipmi_watchdog_pretimeout) 1286 /* User wants pretimeouts, so make sure to watch for them. */ 1287 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1288 1289 out_kfree: 1290 if (rv) { 1291 atomic_dec(&intf->nr_users); 1292 vfree(new_user); 1293 } else { 1294 *user = new_user; 1295 } 1296 mutex_unlock(&ipmi_interfaces_mutex); 1297 return rv; 1298 } 1299 EXPORT_SYMBOL(ipmi_create_user); 1300 1301 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1302 { 1303 int rv = -EINVAL; 1304 struct ipmi_smi *intf; 1305 1306 mutex_lock(&ipmi_interfaces_mutex); 1307 list_for_each_entry(intf, &ipmi_interfaces, link) { 1308 if (intf->intf_num == if_num) { 1309 if (!intf->handlers->get_smi_info) 1310 rv = -ENOTTY; 1311 else 1312 rv = intf->handlers->get_smi_info(intf->send_info, data); 1313 break; 1314 } 1315 } 1316 mutex_unlock(&ipmi_interfaces_mutex); 1317 1318 return rv; 1319 } 1320 EXPORT_SYMBOL(ipmi_get_smi_info); 1321 1322 /* Must be called with intf->users_mutex held. */ 1323 static void _ipmi_destroy_user(struct ipmi_user *user) 1324 { 1325 struct ipmi_smi *intf = user->intf; 1326 int i; 1327 unsigned long flags; 1328 struct cmd_rcvr *rcvr; 1329 struct cmd_rcvr *rcvrs = NULL; 1330 1331 if (!refcount_dec_if_one(&user->destroyed)) 1332 return; 1333 1334 if (user->handler->shutdown) 1335 user->handler->shutdown(user->handler_data); 1336 1337 if (user->handler->ipmi_watchdog_pretimeout) 1338 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1339 1340 if (user->gets_events) 1341 atomic_dec(&intf->event_waiters); 1342 1343 /* Remove the user from the interface's list and sequence table. */ 1344 list_del(&user->link); 1345 atomic_dec(&intf->nr_users); 1346 1347 spin_lock_irqsave(&intf->seq_lock, flags); 1348 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1349 if (intf->seq_table[i].inuse 1350 && (intf->seq_table[i].recv_msg->user == user)) { 1351 intf->seq_table[i].inuse = 0; 1352 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1353 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1354 } 1355 } 1356 spin_unlock_irqrestore(&intf->seq_lock, flags); 1357 1358 /* 1359 * Remove the user from the command receiver's table. First 1360 * we build a list of everything (not using the standard link, 1361 * since other things may be using it till we do 1362 * synchronize_rcu()) then free everything in that list. 1363 */ 1364 mutex_lock(&intf->cmd_rcvrs_mutex); 1365 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1366 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1367 if (rcvr->user == user) { 1368 list_del_rcu(&rcvr->link); 1369 rcvr->next = rcvrs; 1370 rcvrs = rcvr; 1371 } 1372 } 1373 mutex_unlock(&intf->cmd_rcvrs_mutex); 1374 while (rcvrs) { 1375 rcvr = rcvrs; 1376 rcvrs = rcvr->next; 1377 kfree(rcvr); 1378 } 1379 1380 release_ipmi_user(user); 1381 } 1382 1383 void ipmi_destroy_user(struct ipmi_user *user) 1384 { 1385 struct ipmi_smi *intf = user->intf; 1386 1387 mutex_lock(&intf->users_mutex); 1388 _ipmi_destroy_user(user); 1389 mutex_unlock(&intf->users_mutex); 1390 1391 kref_put(&user->refcount, free_ipmi_user); 1392 } 1393 EXPORT_SYMBOL(ipmi_destroy_user); 1394 1395 int ipmi_get_version(struct ipmi_user *user, 1396 unsigned char *major, 1397 unsigned char *minor) 1398 { 1399 struct ipmi_device_id id; 1400 int rv; 1401 1402 user = acquire_ipmi_user(user); 1403 if (!user) 1404 return -ENODEV; 1405 1406 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1407 if (!rv) { 1408 *major = ipmi_version_major(&id); 1409 *minor = ipmi_version_minor(&id); 1410 } 1411 release_ipmi_user(user); 1412 1413 return rv; 1414 } 1415 EXPORT_SYMBOL(ipmi_get_version); 1416 1417 int ipmi_set_my_address(struct ipmi_user *user, 1418 unsigned int channel, 1419 unsigned char address) 1420 { 1421 int rv = 0; 1422 1423 user = acquire_ipmi_user(user); 1424 if (!user) 1425 return -ENODEV; 1426 1427 if (channel >= IPMI_MAX_CHANNELS) { 1428 rv = -EINVAL; 1429 } else { 1430 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1431 user->intf->addrinfo[channel].address = address; 1432 } 1433 release_ipmi_user(user); 1434 1435 return rv; 1436 } 1437 EXPORT_SYMBOL(ipmi_set_my_address); 1438 1439 int ipmi_get_my_address(struct ipmi_user *user, 1440 unsigned int channel, 1441 unsigned char *address) 1442 { 1443 int rv = 0; 1444 1445 user = acquire_ipmi_user(user); 1446 if (!user) 1447 return -ENODEV; 1448 1449 if (channel >= IPMI_MAX_CHANNELS) { 1450 rv = -EINVAL; 1451 } else { 1452 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1453 *address = user->intf->addrinfo[channel].address; 1454 } 1455 release_ipmi_user(user); 1456 1457 return rv; 1458 } 1459 EXPORT_SYMBOL(ipmi_get_my_address); 1460 1461 int ipmi_set_my_LUN(struct ipmi_user *user, 1462 unsigned int channel, 1463 unsigned char LUN) 1464 { 1465 int rv = 0; 1466 1467 user = acquire_ipmi_user(user); 1468 if (!user) 1469 return -ENODEV; 1470 1471 if (channel >= IPMI_MAX_CHANNELS) { 1472 rv = -EINVAL; 1473 } else { 1474 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1475 user->intf->addrinfo[channel].lun = LUN & 0x3; 1476 } 1477 release_ipmi_user(user); 1478 1479 return rv; 1480 } 1481 EXPORT_SYMBOL(ipmi_set_my_LUN); 1482 1483 int ipmi_get_my_LUN(struct ipmi_user *user, 1484 unsigned int channel, 1485 unsigned char *address) 1486 { 1487 int rv = 0; 1488 1489 user = acquire_ipmi_user(user); 1490 if (!user) 1491 return -ENODEV; 1492 1493 if (channel >= IPMI_MAX_CHANNELS) { 1494 rv = -EINVAL; 1495 } else { 1496 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1497 *address = user->intf->addrinfo[channel].lun; 1498 } 1499 release_ipmi_user(user); 1500 1501 return rv; 1502 } 1503 EXPORT_SYMBOL(ipmi_get_my_LUN); 1504 1505 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1506 { 1507 int mode; 1508 unsigned long flags; 1509 1510 user = acquire_ipmi_user(user); 1511 if (!user) 1512 return -ENODEV; 1513 1514 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1515 mode = user->intf->maintenance_mode; 1516 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1517 release_ipmi_user(user); 1518 1519 return mode; 1520 } 1521 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1522 1523 static void maintenance_mode_update(struct ipmi_smi *intf) 1524 { 1525 if (intf->handlers->set_maintenance_mode) 1526 intf->handlers->set_maintenance_mode( 1527 intf->send_info, intf->maintenance_mode_enable); 1528 } 1529 1530 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1531 { 1532 int rv = 0; 1533 unsigned long flags; 1534 struct ipmi_smi *intf = user->intf; 1535 1536 user = acquire_ipmi_user(user); 1537 if (!user) 1538 return -ENODEV; 1539 1540 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1541 if (intf->maintenance_mode != mode) { 1542 switch (mode) { 1543 case IPMI_MAINTENANCE_MODE_AUTO: 1544 intf->maintenance_mode_enable 1545 = (intf->auto_maintenance_timeout > 0); 1546 break; 1547 1548 case IPMI_MAINTENANCE_MODE_OFF: 1549 intf->maintenance_mode_enable = false; 1550 break; 1551 1552 case IPMI_MAINTENANCE_MODE_ON: 1553 intf->maintenance_mode_enable = true; 1554 break; 1555 1556 default: 1557 rv = -EINVAL; 1558 goto out_unlock; 1559 } 1560 intf->maintenance_mode = mode; 1561 1562 maintenance_mode_update(intf); 1563 } 1564 out_unlock: 1565 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1566 release_ipmi_user(user); 1567 1568 return rv; 1569 } 1570 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1571 1572 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1573 { 1574 struct ipmi_smi *intf = user->intf; 1575 struct ipmi_recv_msg *msg, *msg2; 1576 struct list_head msgs; 1577 1578 user = acquire_ipmi_user(user); 1579 if (!user) 1580 return -ENODEV; 1581 1582 INIT_LIST_HEAD(&msgs); 1583 1584 mutex_lock(&intf->events_mutex); 1585 if (user->gets_events == val) 1586 goto out; 1587 1588 user->gets_events = val; 1589 1590 if (val) { 1591 if (atomic_inc_return(&intf->event_waiters) == 1) 1592 need_waiter(intf); 1593 } else { 1594 atomic_dec(&intf->event_waiters); 1595 } 1596 1597 /* Deliver any queued events. */ 1598 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1599 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1600 list_move_tail(&msg->link, &msgs); 1601 intf->waiting_events_count = 0; 1602 if (intf->event_msg_printed) { 1603 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1604 intf->event_msg_printed = 0; 1605 } 1606 1607 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1608 msg->user = user; 1609 kref_get(&user->refcount); 1610 deliver_local_response(intf, msg); 1611 } 1612 } 1613 1614 out: 1615 mutex_unlock(&intf->events_mutex); 1616 release_ipmi_user(user); 1617 1618 return 0; 1619 } 1620 EXPORT_SYMBOL(ipmi_set_gets_events); 1621 1622 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1623 unsigned char netfn, 1624 unsigned char cmd, 1625 unsigned char chan) 1626 { 1627 struct cmd_rcvr *rcvr; 1628 1629 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1630 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1631 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1632 && (rcvr->chans & (1 << chan))) 1633 return rcvr; 1634 } 1635 return NULL; 1636 } 1637 1638 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1639 unsigned char netfn, 1640 unsigned char cmd, 1641 unsigned int chans) 1642 { 1643 struct cmd_rcvr *rcvr; 1644 1645 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1646 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1647 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1648 && (rcvr->chans & chans)) 1649 return 0; 1650 } 1651 return 1; 1652 } 1653 1654 int ipmi_register_for_cmd(struct ipmi_user *user, 1655 unsigned char netfn, 1656 unsigned char cmd, 1657 unsigned int chans) 1658 { 1659 struct ipmi_smi *intf = user->intf; 1660 struct cmd_rcvr *rcvr; 1661 int rv = 0; 1662 1663 user = acquire_ipmi_user(user); 1664 if (!user) 1665 return -ENODEV; 1666 1667 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1668 if (!rcvr) { 1669 rv = -ENOMEM; 1670 goto out_release; 1671 } 1672 rcvr->cmd = cmd; 1673 rcvr->netfn = netfn; 1674 rcvr->chans = chans; 1675 rcvr->user = user; 1676 1677 mutex_lock(&intf->cmd_rcvrs_mutex); 1678 /* Make sure the command/netfn is not already registered. */ 1679 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1680 rv = -EBUSY; 1681 goto out_unlock; 1682 } 1683 1684 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1685 1686 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1687 1688 out_unlock: 1689 mutex_unlock(&intf->cmd_rcvrs_mutex); 1690 if (rv) 1691 kfree(rcvr); 1692 out_release: 1693 release_ipmi_user(user); 1694 1695 return rv; 1696 } 1697 EXPORT_SYMBOL(ipmi_register_for_cmd); 1698 1699 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1700 unsigned char netfn, 1701 unsigned char cmd, 1702 unsigned int chans) 1703 { 1704 struct ipmi_smi *intf = user->intf; 1705 struct cmd_rcvr *rcvr; 1706 struct cmd_rcvr *rcvrs = NULL; 1707 int i, rv = -ENOENT; 1708 1709 user = acquire_ipmi_user(user); 1710 if (!user) 1711 return -ENODEV; 1712 1713 mutex_lock(&intf->cmd_rcvrs_mutex); 1714 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1715 if (((1 << i) & chans) == 0) 1716 continue; 1717 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1718 if (rcvr == NULL) 1719 continue; 1720 if (rcvr->user == user) { 1721 rv = 0; 1722 rcvr->chans &= ~chans; 1723 if (rcvr->chans == 0) { 1724 list_del_rcu(&rcvr->link); 1725 rcvr->next = rcvrs; 1726 rcvrs = rcvr; 1727 } 1728 } 1729 } 1730 mutex_unlock(&intf->cmd_rcvrs_mutex); 1731 synchronize_rcu(); 1732 release_ipmi_user(user); 1733 while (rcvrs) { 1734 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1735 rcvr = rcvrs; 1736 rcvrs = rcvr->next; 1737 kfree(rcvr); 1738 } 1739 1740 return rv; 1741 } 1742 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1743 1744 unsigned char 1745 ipmb_checksum(unsigned char *data, int size) 1746 { 1747 unsigned char csum = 0; 1748 1749 for (; size > 0; size--, data++) 1750 csum += *data; 1751 1752 return -csum; 1753 } 1754 EXPORT_SYMBOL(ipmb_checksum); 1755 1756 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1757 struct kernel_ipmi_msg *msg, 1758 struct ipmi_ipmb_addr *ipmb_addr, 1759 long msgid, 1760 unsigned char ipmb_seq, 1761 int broadcast, 1762 unsigned char source_address, 1763 unsigned char source_lun) 1764 { 1765 int i = broadcast; 1766 1767 /* Format the IPMB header data. */ 1768 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1769 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1770 smi_msg->data[2] = ipmb_addr->channel; 1771 if (broadcast) 1772 smi_msg->data[3] = 0; 1773 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1774 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1775 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1776 smi_msg->data[i+6] = source_address; 1777 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1778 smi_msg->data[i+8] = msg->cmd; 1779 1780 /* Now tack on the data to the message. */ 1781 if (msg->data_len > 0) 1782 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1783 smi_msg->data_size = msg->data_len + 9; 1784 1785 /* Now calculate the checksum and tack it on. */ 1786 smi_msg->data[i+smi_msg->data_size] 1787 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1788 1789 /* 1790 * Add on the checksum size and the offset from the 1791 * broadcast. 1792 */ 1793 smi_msg->data_size += 1 + i; 1794 1795 smi_msg->msgid = msgid; 1796 } 1797 1798 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1799 struct kernel_ipmi_msg *msg, 1800 struct ipmi_lan_addr *lan_addr, 1801 long msgid, 1802 unsigned char ipmb_seq, 1803 unsigned char source_lun) 1804 { 1805 /* Format the IPMB header data. */ 1806 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1807 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1808 smi_msg->data[2] = lan_addr->channel; 1809 smi_msg->data[3] = lan_addr->session_handle; 1810 smi_msg->data[4] = lan_addr->remote_SWID; 1811 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1812 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1813 smi_msg->data[7] = lan_addr->local_SWID; 1814 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1815 smi_msg->data[9] = msg->cmd; 1816 1817 /* Now tack on the data to the message. */ 1818 if (msg->data_len > 0) 1819 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1820 smi_msg->data_size = msg->data_len + 10; 1821 1822 /* Now calculate the checksum and tack it on. */ 1823 smi_msg->data[smi_msg->data_size] 1824 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1825 1826 /* 1827 * Add on the checksum size and the offset from the 1828 * broadcast. 1829 */ 1830 smi_msg->data_size += 1; 1831 1832 smi_msg->msgid = msgid; 1833 } 1834 1835 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1836 struct ipmi_smi_msg *smi_msg, 1837 int priority) 1838 { 1839 if (intf->curr_msg) { 1840 if (priority > 0) 1841 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1842 else 1843 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1844 smi_msg = NULL; 1845 } else { 1846 intf->curr_msg = smi_msg; 1847 } 1848 1849 return smi_msg; 1850 } 1851 1852 static void smi_send(struct ipmi_smi *intf, 1853 const struct ipmi_smi_handlers *handlers, 1854 struct ipmi_smi_msg *smi_msg, int priority) 1855 { 1856 int run_to_completion = READ_ONCE(intf->run_to_completion); 1857 unsigned long flags = 0; 1858 1859 if (!run_to_completion) 1860 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1861 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1862 if (!run_to_completion) 1863 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1864 1865 if (smi_msg) 1866 handlers->sender(intf->send_info, smi_msg); 1867 } 1868 1869 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1870 { 1871 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1872 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1873 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1874 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1875 } 1876 1877 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1878 struct ipmi_addr *addr, 1879 long msgid, 1880 struct kernel_ipmi_msg *msg, 1881 struct ipmi_smi_msg *smi_msg, 1882 struct ipmi_recv_msg *recv_msg, 1883 int retries, 1884 unsigned int retry_time_ms) 1885 { 1886 struct ipmi_system_interface_addr *smi_addr; 1887 1888 if (msg->netfn & 1) 1889 /* Responses are not allowed to the SMI. */ 1890 return -EINVAL; 1891 1892 smi_addr = (struct ipmi_system_interface_addr *) addr; 1893 if (smi_addr->lun > 3) { 1894 ipmi_inc_stat(intf, sent_invalid_commands); 1895 return -EINVAL; 1896 } 1897 1898 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1899 1900 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1901 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1902 || (msg->cmd == IPMI_GET_MSG_CMD) 1903 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1904 /* 1905 * We don't let the user do these, since we manage 1906 * the sequence numbers. 1907 */ 1908 ipmi_inc_stat(intf, sent_invalid_commands); 1909 return -EINVAL; 1910 } 1911 1912 if (is_maintenance_mode_cmd(msg)) { 1913 unsigned long flags; 1914 1915 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1916 intf->auto_maintenance_timeout 1917 = maintenance_mode_timeout_ms; 1918 if (!intf->maintenance_mode 1919 && !intf->maintenance_mode_enable) { 1920 intf->maintenance_mode_enable = true; 1921 maintenance_mode_update(intf); 1922 } 1923 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1924 flags); 1925 } 1926 1927 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1928 ipmi_inc_stat(intf, sent_invalid_commands); 1929 return -EMSGSIZE; 1930 } 1931 1932 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1933 smi_msg->data[1] = msg->cmd; 1934 smi_msg->msgid = msgid; 1935 smi_msg->user_data = recv_msg; 1936 if (msg->data_len > 0) 1937 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1938 smi_msg->data_size = msg->data_len + 2; 1939 ipmi_inc_stat(intf, sent_local_commands); 1940 1941 return 0; 1942 } 1943 1944 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1945 struct ipmi_addr *addr, 1946 long msgid, 1947 struct kernel_ipmi_msg *msg, 1948 struct ipmi_smi_msg *smi_msg, 1949 struct ipmi_recv_msg *recv_msg, 1950 unsigned char source_address, 1951 unsigned char source_lun, 1952 int retries, 1953 unsigned int retry_time_ms) 1954 { 1955 struct ipmi_ipmb_addr *ipmb_addr; 1956 unsigned char ipmb_seq; 1957 long seqid; 1958 int broadcast = 0; 1959 struct ipmi_channel *chans; 1960 int rv = 0; 1961 1962 if (addr->channel >= IPMI_MAX_CHANNELS) { 1963 ipmi_inc_stat(intf, sent_invalid_commands); 1964 return -EINVAL; 1965 } 1966 1967 chans = READ_ONCE(intf->channel_list)->c; 1968 1969 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1970 ipmi_inc_stat(intf, sent_invalid_commands); 1971 return -EINVAL; 1972 } 1973 1974 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1975 /* 1976 * Broadcasts add a zero at the beginning of the 1977 * message, but otherwise is the same as an IPMB 1978 * address. 1979 */ 1980 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1981 broadcast = 1; 1982 retries = 0; /* Don't retry broadcasts. */ 1983 } 1984 1985 /* 1986 * 9 for the header and 1 for the checksum, plus 1987 * possibly one for the broadcast. 1988 */ 1989 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1990 ipmi_inc_stat(intf, sent_invalid_commands); 1991 return -EMSGSIZE; 1992 } 1993 1994 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1995 if (ipmb_addr->lun > 3) { 1996 ipmi_inc_stat(intf, sent_invalid_commands); 1997 return -EINVAL; 1998 } 1999 2000 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2001 2002 if (recv_msg->msg.netfn & 0x1) { 2003 /* 2004 * It's a response, so use the user's sequence 2005 * from msgid. 2006 */ 2007 ipmi_inc_stat(intf, sent_ipmb_responses); 2008 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2009 msgid, broadcast, 2010 source_address, source_lun); 2011 2012 /* 2013 * Save the receive message so we can use it 2014 * to deliver the response. 2015 */ 2016 smi_msg->user_data = recv_msg; 2017 } else { 2018 /* It's a command, so get a sequence for it. */ 2019 unsigned long flags; 2020 2021 spin_lock_irqsave(&intf->seq_lock, flags); 2022 2023 if (is_maintenance_mode_cmd(msg)) 2024 intf->ipmb_maintenance_mode_timeout = 2025 maintenance_mode_timeout_ms; 2026 2027 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2028 /* Different default in maintenance mode */ 2029 retry_time_ms = default_maintenance_retry_ms; 2030 2031 /* 2032 * Create a sequence number with a 1 second 2033 * timeout and 4 retries. 2034 */ 2035 rv = intf_next_seq(intf, 2036 recv_msg, 2037 retry_time_ms, 2038 retries, 2039 broadcast, 2040 &ipmb_seq, 2041 &seqid); 2042 if (rv) 2043 /* 2044 * We have used up all the sequence numbers, 2045 * probably, so abort. 2046 */ 2047 goto out_err; 2048 2049 ipmi_inc_stat(intf, sent_ipmb_commands); 2050 2051 /* 2052 * Store the sequence number in the message, 2053 * so that when the send message response 2054 * comes back we can start the timer. 2055 */ 2056 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2057 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2058 ipmb_seq, broadcast, 2059 source_address, source_lun); 2060 2061 /* 2062 * Copy the message into the recv message data, so we 2063 * can retransmit it later if necessary. 2064 */ 2065 memcpy(recv_msg->msg_data, smi_msg->data, 2066 smi_msg->data_size); 2067 recv_msg->msg.data = recv_msg->msg_data; 2068 recv_msg->msg.data_len = smi_msg->data_size; 2069 2070 /* 2071 * We don't unlock until here, because we need 2072 * to copy the completed message into the 2073 * recv_msg before we release the lock. 2074 * Otherwise, race conditions may bite us. I 2075 * know that's pretty paranoid, but I prefer 2076 * to be correct. 2077 */ 2078 out_err: 2079 spin_unlock_irqrestore(&intf->seq_lock, flags); 2080 } 2081 2082 return rv; 2083 } 2084 2085 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2086 struct ipmi_addr *addr, 2087 long msgid, 2088 struct kernel_ipmi_msg *msg, 2089 struct ipmi_smi_msg *smi_msg, 2090 struct ipmi_recv_msg *recv_msg, 2091 unsigned char source_lun) 2092 { 2093 struct ipmi_ipmb_direct_addr *daddr; 2094 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2095 2096 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2097 return -EAFNOSUPPORT; 2098 2099 /* Responses must have a completion code. */ 2100 if (!is_cmd && msg->data_len < 1) { 2101 ipmi_inc_stat(intf, sent_invalid_commands); 2102 return -EINVAL; 2103 } 2104 2105 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2106 ipmi_inc_stat(intf, sent_invalid_commands); 2107 return -EMSGSIZE; 2108 } 2109 2110 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2111 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2112 ipmi_inc_stat(intf, sent_invalid_commands); 2113 return -EINVAL; 2114 } 2115 2116 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2117 smi_msg->msgid = msgid; 2118 2119 if (is_cmd) { 2120 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2121 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2122 } else { 2123 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2124 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2125 } 2126 smi_msg->data[1] = daddr->slave_addr; 2127 smi_msg->data[3] = msg->cmd; 2128 2129 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2130 smi_msg->data_size = msg->data_len + 4; 2131 2132 smi_msg->user_data = recv_msg; 2133 2134 return 0; 2135 } 2136 2137 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2138 struct ipmi_addr *addr, 2139 long msgid, 2140 struct kernel_ipmi_msg *msg, 2141 struct ipmi_smi_msg *smi_msg, 2142 struct ipmi_recv_msg *recv_msg, 2143 unsigned char source_lun, 2144 int retries, 2145 unsigned int retry_time_ms) 2146 { 2147 struct ipmi_lan_addr *lan_addr; 2148 unsigned char ipmb_seq; 2149 long seqid; 2150 struct ipmi_channel *chans; 2151 int rv = 0; 2152 2153 if (addr->channel >= IPMI_MAX_CHANNELS) { 2154 ipmi_inc_stat(intf, sent_invalid_commands); 2155 return -EINVAL; 2156 } 2157 2158 chans = READ_ONCE(intf->channel_list)->c; 2159 2160 if ((chans[addr->channel].medium 2161 != IPMI_CHANNEL_MEDIUM_8023LAN) 2162 && (chans[addr->channel].medium 2163 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2164 ipmi_inc_stat(intf, sent_invalid_commands); 2165 return -EINVAL; 2166 } 2167 2168 /* 11 for the header and 1 for the checksum. */ 2169 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2170 ipmi_inc_stat(intf, sent_invalid_commands); 2171 return -EMSGSIZE; 2172 } 2173 2174 lan_addr = (struct ipmi_lan_addr *) addr; 2175 if (lan_addr->lun > 3) { 2176 ipmi_inc_stat(intf, sent_invalid_commands); 2177 return -EINVAL; 2178 } 2179 2180 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2181 2182 if (recv_msg->msg.netfn & 0x1) { 2183 /* 2184 * It's a response, so use the user's sequence 2185 * from msgid. 2186 */ 2187 ipmi_inc_stat(intf, sent_lan_responses); 2188 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2189 msgid, source_lun); 2190 2191 /* 2192 * Save the receive message so we can use it 2193 * to deliver the response. 2194 */ 2195 smi_msg->user_data = recv_msg; 2196 } else { 2197 /* It's a command, so get a sequence for it. */ 2198 unsigned long flags; 2199 2200 spin_lock_irqsave(&intf->seq_lock, flags); 2201 2202 /* 2203 * Create a sequence number with a 1 second 2204 * timeout and 4 retries. 2205 */ 2206 rv = intf_next_seq(intf, 2207 recv_msg, 2208 retry_time_ms, 2209 retries, 2210 0, 2211 &ipmb_seq, 2212 &seqid); 2213 if (rv) 2214 /* 2215 * We have used up all the sequence numbers, 2216 * probably, so abort. 2217 */ 2218 goto out_err; 2219 2220 ipmi_inc_stat(intf, sent_lan_commands); 2221 2222 /* 2223 * Store the sequence number in the message, 2224 * so that when the send message response 2225 * comes back we can start the timer. 2226 */ 2227 format_lan_msg(smi_msg, msg, lan_addr, 2228 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2229 ipmb_seq, source_lun); 2230 2231 /* 2232 * Copy the message into the recv message data, so we 2233 * can retransmit it later if necessary. 2234 */ 2235 memcpy(recv_msg->msg_data, smi_msg->data, 2236 smi_msg->data_size); 2237 recv_msg->msg.data = recv_msg->msg_data; 2238 recv_msg->msg.data_len = smi_msg->data_size; 2239 2240 /* 2241 * We don't unlock until here, because we need 2242 * to copy the completed message into the 2243 * recv_msg before we release the lock. 2244 * Otherwise, race conditions may bite us. I 2245 * know that's pretty paranoid, but I prefer 2246 * to be correct. 2247 */ 2248 out_err: 2249 spin_unlock_irqrestore(&intf->seq_lock, flags); 2250 } 2251 2252 return rv; 2253 } 2254 2255 /* 2256 * Separate from ipmi_request so that the user does not have to be 2257 * supplied in certain circumstances (mainly at panic time). If 2258 * messages are supplied, they will be freed, even if an error 2259 * occurs. 2260 */ 2261 static int i_ipmi_request(struct ipmi_user *user, 2262 struct ipmi_smi *intf, 2263 struct ipmi_addr *addr, 2264 long msgid, 2265 struct kernel_ipmi_msg *msg, 2266 void *user_msg_data, 2267 void *supplied_smi, 2268 struct ipmi_recv_msg *supplied_recv, 2269 int priority, 2270 unsigned char source_address, 2271 unsigned char source_lun, 2272 int retries, 2273 unsigned int retry_time_ms) 2274 { 2275 struct ipmi_smi_msg *smi_msg; 2276 struct ipmi_recv_msg *recv_msg; 2277 int rv = 0; 2278 2279 if (user) { 2280 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 2281 /* Decrement will happen at the end of the routine. */ 2282 rv = -EBUSY; 2283 goto out; 2284 } 2285 } 2286 2287 if (supplied_recv) 2288 recv_msg = supplied_recv; 2289 else { 2290 recv_msg = ipmi_alloc_recv_msg(); 2291 if (recv_msg == NULL) { 2292 rv = -ENOMEM; 2293 goto out; 2294 } 2295 } 2296 recv_msg->user_msg_data = user_msg_data; 2297 2298 if (supplied_smi) 2299 smi_msg = supplied_smi; 2300 else { 2301 smi_msg = ipmi_alloc_smi_msg(); 2302 if (smi_msg == NULL) { 2303 if (!supplied_recv) 2304 ipmi_free_recv_msg(recv_msg); 2305 rv = -ENOMEM; 2306 goto out; 2307 } 2308 } 2309 2310 mutex_lock(&intf->users_mutex); 2311 if (intf->in_shutdown) { 2312 rv = -ENODEV; 2313 goto out_err; 2314 } 2315 2316 recv_msg->user = user; 2317 if (user) 2318 /* The put happens when the message is freed. */ 2319 kref_get(&user->refcount); 2320 recv_msg->msgid = msgid; 2321 /* 2322 * Store the message to send in the receive message so timeout 2323 * responses can get the proper response data. 2324 */ 2325 recv_msg->msg = *msg; 2326 2327 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2328 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2329 recv_msg, retries, retry_time_ms); 2330 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2331 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2332 source_address, source_lun, 2333 retries, retry_time_ms); 2334 } else if (is_ipmb_direct_addr(addr)) { 2335 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2336 recv_msg, source_lun); 2337 } else if (is_lan_addr(addr)) { 2338 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2339 source_lun, retries, retry_time_ms); 2340 } else { 2341 /* Unknown address type. */ 2342 ipmi_inc_stat(intf, sent_invalid_commands); 2343 rv = -EINVAL; 2344 } 2345 2346 if (rv) { 2347 out_err: 2348 ipmi_free_smi_msg(smi_msg); 2349 ipmi_free_recv_msg(recv_msg); 2350 } else { 2351 dev_dbg(intf->si_dev, "Send: %*ph\n", 2352 smi_msg->data_size, smi_msg->data); 2353 2354 smi_send(intf, intf->handlers, smi_msg, priority); 2355 } 2356 mutex_unlock(&intf->users_mutex); 2357 2358 out: 2359 if (rv && user) 2360 atomic_dec(&user->nr_msgs); 2361 return rv; 2362 } 2363 2364 static int check_addr(struct ipmi_smi *intf, 2365 struct ipmi_addr *addr, 2366 unsigned char *saddr, 2367 unsigned char *lun) 2368 { 2369 if (addr->channel >= IPMI_MAX_CHANNELS) 2370 return -EINVAL; 2371 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2372 *lun = intf->addrinfo[addr->channel].lun; 2373 *saddr = intf->addrinfo[addr->channel].address; 2374 return 0; 2375 } 2376 2377 int ipmi_request_settime(struct ipmi_user *user, 2378 struct ipmi_addr *addr, 2379 long msgid, 2380 struct kernel_ipmi_msg *msg, 2381 void *user_msg_data, 2382 int priority, 2383 int retries, 2384 unsigned int retry_time_ms) 2385 { 2386 unsigned char saddr = 0, lun = 0; 2387 int rv; 2388 2389 if (!user) 2390 return -EINVAL; 2391 2392 user = acquire_ipmi_user(user); 2393 if (!user) 2394 return -ENODEV; 2395 2396 rv = check_addr(user->intf, addr, &saddr, &lun); 2397 if (!rv) 2398 rv = i_ipmi_request(user, 2399 user->intf, 2400 addr, 2401 msgid, 2402 msg, 2403 user_msg_data, 2404 NULL, NULL, 2405 priority, 2406 saddr, 2407 lun, 2408 retries, 2409 retry_time_ms); 2410 2411 release_ipmi_user(user); 2412 return rv; 2413 } 2414 EXPORT_SYMBOL(ipmi_request_settime); 2415 2416 int ipmi_request_supply_msgs(struct ipmi_user *user, 2417 struct ipmi_addr *addr, 2418 long msgid, 2419 struct kernel_ipmi_msg *msg, 2420 void *user_msg_data, 2421 void *supplied_smi, 2422 struct ipmi_recv_msg *supplied_recv, 2423 int priority) 2424 { 2425 unsigned char saddr = 0, lun = 0; 2426 int rv; 2427 2428 if (!user) 2429 return -EINVAL; 2430 2431 user = acquire_ipmi_user(user); 2432 if (!user) 2433 return -ENODEV; 2434 2435 rv = check_addr(user->intf, addr, &saddr, &lun); 2436 if (!rv) 2437 rv = i_ipmi_request(user, 2438 user->intf, 2439 addr, 2440 msgid, 2441 msg, 2442 user_msg_data, 2443 supplied_smi, 2444 supplied_recv, 2445 priority, 2446 saddr, 2447 lun, 2448 -1, 0); 2449 2450 release_ipmi_user(user); 2451 return rv; 2452 } 2453 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2454 2455 static void bmc_device_id_handler(struct ipmi_smi *intf, 2456 struct ipmi_recv_msg *msg) 2457 { 2458 int rv; 2459 2460 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2461 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2462 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2463 dev_warn(intf->si_dev, 2464 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2465 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2466 return; 2467 } 2468 2469 if (msg->msg.data[0]) { 2470 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2471 msg->msg.data[0]); 2472 intf->bmc->dyn_id_set = 0; 2473 goto out; 2474 } 2475 2476 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2477 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2478 if (rv) { 2479 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2480 /* record completion code when error */ 2481 intf->bmc->cc = msg->msg.data[0]; 2482 intf->bmc->dyn_id_set = 0; 2483 } else { 2484 /* 2485 * Make sure the id data is available before setting 2486 * dyn_id_set. 2487 */ 2488 smp_wmb(); 2489 intf->bmc->dyn_id_set = 1; 2490 } 2491 out: 2492 wake_up(&intf->waitq); 2493 } 2494 2495 static int 2496 send_get_device_id_cmd(struct ipmi_smi *intf) 2497 { 2498 struct ipmi_system_interface_addr si; 2499 struct kernel_ipmi_msg msg; 2500 2501 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2502 si.channel = IPMI_BMC_CHANNEL; 2503 si.lun = 0; 2504 2505 msg.netfn = IPMI_NETFN_APP_REQUEST; 2506 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2507 msg.data = NULL; 2508 msg.data_len = 0; 2509 2510 return i_ipmi_request(NULL, 2511 intf, 2512 (struct ipmi_addr *) &si, 2513 0, 2514 &msg, 2515 intf, 2516 NULL, 2517 NULL, 2518 0, 2519 intf->addrinfo[0].address, 2520 intf->addrinfo[0].lun, 2521 -1, 0); 2522 } 2523 2524 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2525 { 2526 int rv; 2527 unsigned int retry_count = 0; 2528 2529 intf->null_user_handler = bmc_device_id_handler; 2530 2531 retry: 2532 bmc->cc = 0; 2533 bmc->dyn_id_set = 2; 2534 2535 rv = send_get_device_id_cmd(intf); 2536 if (rv) 2537 goto out_reset_handler; 2538 2539 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2540 2541 if (!bmc->dyn_id_set) { 2542 if (bmc->cc != IPMI_CC_NO_ERROR && 2543 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2544 msleep(500); 2545 dev_warn(intf->si_dev, 2546 "BMC returned 0x%2.2x, retry get bmc device id\n", 2547 bmc->cc); 2548 goto retry; 2549 } 2550 2551 rv = -EIO; /* Something went wrong in the fetch. */ 2552 } 2553 2554 /* dyn_id_set makes the id data available. */ 2555 smp_rmb(); 2556 2557 out_reset_handler: 2558 intf->null_user_handler = NULL; 2559 2560 return rv; 2561 } 2562 2563 /* 2564 * Fetch the device id for the bmc/interface. You must pass in either 2565 * bmc or intf, this code will get the other one. If the data has 2566 * been recently fetched, this will just use the cached data. Otherwise 2567 * it will run a new fetch. 2568 * 2569 * Except for the first time this is called (in ipmi_add_smi()), 2570 * this will always return good data; 2571 */ 2572 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2573 struct ipmi_device_id *id, 2574 bool *guid_set, guid_t *guid, int intf_num) 2575 { 2576 int rv = 0; 2577 int prev_dyn_id_set, prev_guid_set; 2578 bool intf_set = intf != NULL; 2579 2580 if (!intf) { 2581 mutex_lock(&bmc->dyn_mutex); 2582 retry_bmc_lock: 2583 if (list_empty(&bmc->intfs)) { 2584 mutex_unlock(&bmc->dyn_mutex); 2585 return -ENOENT; 2586 } 2587 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2588 bmc_link); 2589 kref_get(&intf->refcount); 2590 mutex_unlock(&bmc->dyn_mutex); 2591 mutex_lock(&intf->bmc_reg_mutex); 2592 mutex_lock(&bmc->dyn_mutex); 2593 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2594 bmc_link)) { 2595 mutex_unlock(&intf->bmc_reg_mutex); 2596 kref_put(&intf->refcount, intf_free); 2597 goto retry_bmc_lock; 2598 } 2599 } else { 2600 mutex_lock(&intf->bmc_reg_mutex); 2601 bmc = intf->bmc; 2602 mutex_lock(&bmc->dyn_mutex); 2603 kref_get(&intf->refcount); 2604 } 2605 2606 /* If we have a valid and current ID, just return that. */ 2607 if (intf->in_bmc_register || 2608 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2609 goto out_noprocessing; 2610 2611 prev_guid_set = bmc->dyn_guid_set; 2612 __get_guid(intf); 2613 2614 prev_dyn_id_set = bmc->dyn_id_set; 2615 rv = __get_device_id(intf, bmc); 2616 if (rv) 2617 goto out; 2618 2619 /* 2620 * The guid, device id, manufacturer id, and product id should 2621 * not change on a BMC. If it does we have to do some dancing. 2622 */ 2623 if (!intf->bmc_registered 2624 || (!prev_guid_set && bmc->dyn_guid_set) 2625 || (!prev_dyn_id_set && bmc->dyn_id_set) 2626 || (prev_guid_set && bmc->dyn_guid_set 2627 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2628 || bmc->id.device_id != bmc->fetch_id.device_id 2629 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2630 || bmc->id.product_id != bmc->fetch_id.product_id) { 2631 struct ipmi_device_id id = bmc->fetch_id; 2632 int guid_set = bmc->dyn_guid_set; 2633 guid_t guid; 2634 2635 guid = bmc->fetch_guid; 2636 mutex_unlock(&bmc->dyn_mutex); 2637 2638 __ipmi_bmc_unregister(intf); 2639 /* Fill in the temporary BMC for good measure. */ 2640 intf->bmc->id = id; 2641 intf->bmc->dyn_guid_set = guid_set; 2642 intf->bmc->guid = guid; 2643 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2644 need_waiter(intf); /* Retry later on an error. */ 2645 else 2646 __scan_channels(intf, &id); 2647 2648 2649 if (!intf_set) { 2650 /* 2651 * We weren't given the interface on the 2652 * command line, so restart the operation on 2653 * the next interface for the BMC. 2654 */ 2655 mutex_unlock(&intf->bmc_reg_mutex); 2656 mutex_lock(&bmc->dyn_mutex); 2657 goto retry_bmc_lock; 2658 } 2659 2660 /* We have a new BMC, set it up. */ 2661 bmc = intf->bmc; 2662 mutex_lock(&bmc->dyn_mutex); 2663 goto out_noprocessing; 2664 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2665 /* Version info changes, scan the channels again. */ 2666 __scan_channels(intf, &bmc->fetch_id); 2667 2668 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2669 2670 out: 2671 if (rv && prev_dyn_id_set) { 2672 rv = 0; /* Ignore failures if we have previous data. */ 2673 bmc->dyn_id_set = prev_dyn_id_set; 2674 } 2675 if (!rv) { 2676 bmc->id = bmc->fetch_id; 2677 if (bmc->dyn_guid_set) 2678 bmc->guid = bmc->fetch_guid; 2679 else if (prev_guid_set) 2680 /* 2681 * The guid used to be valid and it failed to fetch, 2682 * just use the cached value. 2683 */ 2684 bmc->dyn_guid_set = prev_guid_set; 2685 } 2686 out_noprocessing: 2687 if (!rv) { 2688 if (id) 2689 *id = bmc->id; 2690 2691 if (guid_set) 2692 *guid_set = bmc->dyn_guid_set; 2693 2694 if (guid && bmc->dyn_guid_set) 2695 *guid = bmc->guid; 2696 } 2697 2698 mutex_unlock(&bmc->dyn_mutex); 2699 mutex_unlock(&intf->bmc_reg_mutex); 2700 2701 kref_put(&intf->refcount, intf_free); 2702 return rv; 2703 } 2704 2705 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2706 struct ipmi_device_id *id, 2707 bool *guid_set, guid_t *guid) 2708 { 2709 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2710 } 2711 2712 static ssize_t device_id_show(struct device *dev, 2713 struct device_attribute *attr, 2714 char *buf) 2715 { 2716 struct bmc_device *bmc = to_bmc_device(dev); 2717 struct ipmi_device_id id; 2718 int rv; 2719 2720 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2721 if (rv) 2722 return rv; 2723 2724 return sysfs_emit(buf, "%u\n", id.device_id); 2725 } 2726 static DEVICE_ATTR_RO(device_id); 2727 2728 static ssize_t provides_device_sdrs_show(struct device *dev, 2729 struct device_attribute *attr, 2730 char *buf) 2731 { 2732 struct bmc_device *bmc = to_bmc_device(dev); 2733 struct ipmi_device_id id; 2734 int rv; 2735 2736 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2737 if (rv) 2738 return rv; 2739 2740 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2741 } 2742 static DEVICE_ATTR_RO(provides_device_sdrs); 2743 2744 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2745 char *buf) 2746 { 2747 struct bmc_device *bmc = to_bmc_device(dev); 2748 struct ipmi_device_id id; 2749 int rv; 2750 2751 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2752 if (rv) 2753 return rv; 2754 2755 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2756 } 2757 static DEVICE_ATTR_RO(revision); 2758 2759 static ssize_t firmware_revision_show(struct device *dev, 2760 struct device_attribute *attr, 2761 char *buf) 2762 { 2763 struct bmc_device *bmc = to_bmc_device(dev); 2764 struct ipmi_device_id id; 2765 int rv; 2766 2767 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2768 if (rv) 2769 return rv; 2770 2771 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2772 id.firmware_revision_2); 2773 } 2774 static DEVICE_ATTR_RO(firmware_revision); 2775 2776 static ssize_t ipmi_version_show(struct device *dev, 2777 struct device_attribute *attr, 2778 char *buf) 2779 { 2780 struct bmc_device *bmc = to_bmc_device(dev); 2781 struct ipmi_device_id id; 2782 int rv; 2783 2784 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2785 if (rv) 2786 return rv; 2787 2788 return sysfs_emit(buf, "%u.%u\n", 2789 ipmi_version_major(&id), 2790 ipmi_version_minor(&id)); 2791 } 2792 static DEVICE_ATTR_RO(ipmi_version); 2793 2794 static ssize_t add_dev_support_show(struct device *dev, 2795 struct device_attribute *attr, 2796 char *buf) 2797 { 2798 struct bmc_device *bmc = to_bmc_device(dev); 2799 struct ipmi_device_id id; 2800 int rv; 2801 2802 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2803 if (rv) 2804 return rv; 2805 2806 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2807 } 2808 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2809 NULL); 2810 2811 static ssize_t manufacturer_id_show(struct device *dev, 2812 struct device_attribute *attr, 2813 char *buf) 2814 { 2815 struct bmc_device *bmc = to_bmc_device(dev); 2816 struct ipmi_device_id id; 2817 int rv; 2818 2819 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2820 if (rv) 2821 return rv; 2822 2823 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2824 } 2825 static DEVICE_ATTR_RO(manufacturer_id); 2826 2827 static ssize_t product_id_show(struct device *dev, 2828 struct device_attribute *attr, 2829 char *buf) 2830 { 2831 struct bmc_device *bmc = to_bmc_device(dev); 2832 struct ipmi_device_id id; 2833 int rv; 2834 2835 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2836 if (rv) 2837 return rv; 2838 2839 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2840 } 2841 static DEVICE_ATTR_RO(product_id); 2842 2843 static ssize_t aux_firmware_rev_show(struct device *dev, 2844 struct device_attribute *attr, 2845 char *buf) 2846 { 2847 struct bmc_device *bmc = to_bmc_device(dev); 2848 struct ipmi_device_id id; 2849 int rv; 2850 2851 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2852 if (rv) 2853 return rv; 2854 2855 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2856 id.aux_firmware_revision[3], 2857 id.aux_firmware_revision[2], 2858 id.aux_firmware_revision[1], 2859 id.aux_firmware_revision[0]); 2860 } 2861 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2862 2863 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2864 char *buf) 2865 { 2866 struct bmc_device *bmc = to_bmc_device(dev); 2867 bool guid_set; 2868 guid_t guid; 2869 int rv; 2870 2871 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2872 if (rv) 2873 return rv; 2874 if (!guid_set) 2875 return -ENOENT; 2876 2877 return sysfs_emit(buf, "%pUl\n", &guid); 2878 } 2879 static DEVICE_ATTR_RO(guid); 2880 2881 static struct attribute *bmc_dev_attrs[] = { 2882 &dev_attr_device_id.attr, 2883 &dev_attr_provides_device_sdrs.attr, 2884 &dev_attr_revision.attr, 2885 &dev_attr_firmware_revision.attr, 2886 &dev_attr_ipmi_version.attr, 2887 &dev_attr_additional_device_support.attr, 2888 &dev_attr_manufacturer_id.attr, 2889 &dev_attr_product_id.attr, 2890 &dev_attr_aux_firmware_revision.attr, 2891 &dev_attr_guid.attr, 2892 NULL 2893 }; 2894 2895 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2896 struct attribute *attr, int idx) 2897 { 2898 struct device *dev = kobj_to_dev(kobj); 2899 struct bmc_device *bmc = to_bmc_device(dev); 2900 umode_t mode = attr->mode; 2901 int rv; 2902 2903 if (attr == &dev_attr_aux_firmware_revision.attr) { 2904 struct ipmi_device_id id; 2905 2906 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2907 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2908 } 2909 if (attr == &dev_attr_guid.attr) { 2910 bool guid_set; 2911 2912 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2913 return (!rv && guid_set) ? mode : 0; 2914 } 2915 return mode; 2916 } 2917 2918 static const struct attribute_group bmc_dev_attr_group = { 2919 .attrs = bmc_dev_attrs, 2920 .is_visible = bmc_dev_attr_is_visible, 2921 }; 2922 2923 static const struct attribute_group *bmc_dev_attr_groups[] = { 2924 &bmc_dev_attr_group, 2925 NULL 2926 }; 2927 2928 static const struct device_type bmc_device_type = { 2929 .groups = bmc_dev_attr_groups, 2930 }; 2931 2932 static int __find_bmc_guid(struct device *dev, const void *data) 2933 { 2934 const guid_t *guid = data; 2935 struct bmc_device *bmc; 2936 int rv; 2937 2938 if (dev->type != &bmc_device_type) 2939 return 0; 2940 2941 bmc = to_bmc_device(dev); 2942 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2943 if (rv) 2944 rv = kref_get_unless_zero(&bmc->usecount); 2945 return rv; 2946 } 2947 2948 /* 2949 * Returns with the bmc's usecount incremented, if it is non-NULL. 2950 */ 2951 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2952 guid_t *guid) 2953 { 2954 struct device *dev; 2955 struct bmc_device *bmc = NULL; 2956 2957 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2958 if (dev) { 2959 bmc = to_bmc_device(dev); 2960 put_device(dev); 2961 } 2962 return bmc; 2963 } 2964 2965 struct prod_dev_id { 2966 unsigned int product_id; 2967 unsigned char device_id; 2968 }; 2969 2970 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2971 { 2972 const struct prod_dev_id *cid = data; 2973 struct bmc_device *bmc; 2974 int rv; 2975 2976 if (dev->type != &bmc_device_type) 2977 return 0; 2978 2979 bmc = to_bmc_device(dev); 2980 rv = (bmc->id.product_id == cid->product_id 2981 && bmc->id.device_id == cid->device_id); 2982 if (rv) 2983 rv = kref_get_unless_zero(&bmc->usecount); 2984 return rv; 2985 } 2986 2987 /* 2988 * Returns with the bmc's usecount incremented, if it is non-NULL. 2989 */ 2990 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2991 struct device_driver *drv, 2992 unsigned int product_id, unsigned char device_id) 2993 { 2994 struct prod_dev_id id = { 2995 .product_id = product_id, 2996 .device_id = device_id, 2997 }; 2998 struct device *dev; 2999 struct bmc_device *bmc = NULL; 3000 3001 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3002 if (dev) { 3003 bmc = to_bmc_device(dev); 3004 put_device(dev); 3005 } 3006 return bmc; 3007 } 3008 3009 static DEFINE_IDA(ipmi_bmc_ida); 3010 3011 static void 3012 release_bmc_device(struct device *dev) 3013 { 3014 kfree(to_bmc_device(dev)); 3015 } 3016 3017 static void cleanup_bmc_work(struct work_struct *work) 3018 { 3019 struct bmc_device *bmc = container_of(work, struct bmc_device, 3020 remove_work); 3021 int id = bmc->pdev.id; /* Unregister overwrites id */ 3022 3023 platform_device_unregister(&bmc->pdev); 3024 ida_free(&ipmi_bmc_ida, id); 3025 } 3026 3027 static void 3028 cleanup_bmc_device(struct kref *ref) 3029 { 3030 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3031 3032 /* 3033 * Remove the platform device in a work queue to avoid issues 3034 * with removing the device attributes while reading a device 3035 * attribute. 3036 */ 3037 queue_work(bmc_remove_work_wq, &bmc->remove_work); 3038 } 3039 3040 /* 3041 * Must be called with intf->bmc_reg_mutex held. 3042 */ 3043 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3044 { 3045 struct bmc_device *bmc = intf->bmc; 3046 3047 if (!intf->bmc_registered) 3048 return; 3049 3050 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3051 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3052 kfree(intf->my_dev_name); 3053 intf->my_dev_name = NULL; 3054 3055 mutex_lock(&bmc->dyn_mutex); 3056 list_del(&intf->bmc_link); 3057 mutex_unlock(&bmc->dyn_mutex); 3058 intf->bmc = &intf->tmp_bmc; 3059 kref_put(&bmc->usecount, cleanup_bmc_device); 3060 intf->bmc_registered = false; 3061 } 3062 3063 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3064 { 3065 mutex_lock(&intf->bmc_reg_mutex); 3066 __ipmi_bmc_unregister(intf); 3067 mutex_unlock(&intf->bmc_reg_mutex); 3068 } 3069 3070 /* 3071 * Must be called with intf->bmc_reg_mutex held. 3072 */ 3073 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3074 struct ipmi_device_id *id, 3075 bool guid_set, guid_t *guid, int intf_num) 3076 { 3077 int rv; 3078 struct bmc_device *bmc; 3079 struct bmc_device *old_bmc; 3080 3081 /* 3082 * platform_device_register() can cause bmc_reg_mutex to 3083 * be claimed because of the is_visible functions of 3084 * the attributes. Eliminate possible recursion and 3085 * release the lock. 3086 */ 3087 intf->in_bmc_register = true; 3088 mutex_unlock(&intf->bmc_reg_mutex); 3089 3090 /* 3091 * Try to find if there is an bmc_device struct 3092 * representing the interfaced BMC already 3093 */ 3094 mutex_lock(&ipmidriver_mutex); 3095 if (guid_set) 3096 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3097 else 3098 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3099 id->product_id, 3100 id->device_id); 3101 3102 /* 3103 * If there is already an bmc_device, free the new one, 3104 * otherwise register the new BMC device 3105 */ 3106 if (old_bmc) { 3107 bmc = old_bmc; 3108 /* 3109 * Note: old_bmc already has usecount incremented by 3110 * the BMC find functions. 3111 */ 3112 intf->bmc = old_bmc; 3113 mutex_lock(&bmc->dyn_mutex); 3114 list_add_tail(&intf->bmc_link, &bmc->intfs); 3115 mutex_unlock(&bmc->dyn_mutex); 3116 3117 dev_info(intf->si_dev, 3118 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3119 bmc->id.manufacturer_id, 3120 bmc->id.product_id, 3121 bmc->id.device_id); 3122 } else { 3123 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3124 if (!bmc) { 3125 rv = -ENOMEM; 3126 goto out; 3127 } 3128 INIT_LIST_HEAD(&bmc->intfs); 3129 mutex_init(&bmc->dyn_mutex); 3130 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3131 3132 bmc->id = *id; 3133 bmc->dyn_id_set = 1; 3134 bmc->dyn_guid_set = guid_set; 3135 bmc->guid = *guid; 3136 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3137 3138 bmc->pdev.name = "ipmi_bmc"; 3139 3140 rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL); 3141 if (rv < 0) { 3142 kfree(bmc); 3143 goto out; 3144 } 3145 3146 bmc->pdev.dev.driver = &ipmidriver.driver; 3147 bmc->pdev.id = rv; 3148 bmc->pdev.dev.release = release_bmc_device; 3149 bmc->pdev.dev.type = &bmc_device_type; 3150 kref_init(&bmc->usecount); 3151 3152 intf->bmc = bmc; 3153 mutex_lock(&bmc->dyn_mutex); 3154 list_add_tail(&intf->bmc_link, &bmc->intfs); 3155 mutex_unlock(&bmc->dyn_mutex); 3156 3157 rv = platform_device_register(&bmc->pdev); 3158 if (rv) { 3159 dev_err(intf->si_dev, 3160 "Unable to register bmc device: %d\n", 3161 rv); 3162 goto out_list_del; 3163 } 3164 3165 dev_info(intf->si_dev, 3166 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3167 bmc->id.manufacturer_id, 3168 bmc->id.product_id, 3169 bmc->id.device_id); 3170 } 3171 3172 /* 3173 * create symlink from system interface device to bmc device 3174 * and back. 3175 */ 3176 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3177 if (rv) { 3178 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3179 goto out_put_bmc; 3180 } 3181 3182 if (intf_num == -1) 3183 intf_num = intf->intf_num; 3184 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3185 if (!intf->my_dev_name) { 3186 rv = -ENOMEM; 3187 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3188 rv); 3189 goto out_unlink1; 3190 } 3191 3192 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3193 intf->my_dev_name); 3194 if (rv) { 3195 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3196 rv); 3197 goto out_free_my_dev_name; 3198 } 3199 3200 intf->bmc_registered = true; 3201 3202 out: 3203 mutex_unlock(&ipmidriver_mutex); 3204 mutex_lock(&intf->bmc_reg_mutex); 3205 intf->in_bmc_register = false; 3206 return rv; 3207 3208 3209 out_free_my_dev_name: 3210 kfree(intf->my_dev_name); 3211 intf->my_dev_name = NULL; 3212 3213 out_unlink1: 3214 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3215 3216 out_put_bmc: 3217 mutex_lock(&bmc->dyn_mutex); 3218 list_del(&intf->bmc_link); 3219 mutex_unlock(&bmc->dyn_mutex); 3220 intf->bmc = &intf->tmp_bmc; 3221 kref_put(&bmc->usecount, cleanup_bmc_device); 3222 goto out; 3223 3224 out_list_del: 3225 mutex_lock(&bmc->dyn_mutex); 3226 list_del(&intf->bmc_link); 3227 mutex_unlock(&bmc->dyn_mutex); 3228 intf->bmc = &intf->tmp_bmc; 3229 put_device(&bmc->pdev.dev); 3230 goto out; 3231 } 3232 3233 static int 3234 send_guid_cmd(struct ipmi_smi *intf, int chan) 3235 { 3236 struct kernel_ipmi_msg msg; 3237 struct ipmi_system_interface_addr si; 3238 3239 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3240 si.channel = IPMI_BMC_CHANNEL; 3241 si.lun = 0; 3242 3243 msg.netfn = IPMI_NETFN_APP_REQUEST; 3244 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3245 msg.data = NULL; 3246 msg.data_len = 0; 3247 return i_ipmi_request(NULL, 3248 intf, 3249 (struct ipmi_addr *) &si, 3250 0, 3251 &msg, 3252 intf, 3253 NULL, 3254 NULL, 3255 0, 3256 intf->addrinfo[0].address, 3257 intf->addrinfo[0].lun, 3258 -1, 0); 3259 } 3260 3261 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3262 { 3263 struct bmc_device *bmc = intf->bmc; 3264 3265 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3266 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3267 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3268 /* Not for me */ 3269 return; 3270 3271 if (msg->msg.data[0] != 0) { 3272 /* Error from getting the GUID, the BMC doesn't have one. */ 3273 bmc->dyn_guid_set = 0; 3274 goto out; 3275 } 3276 3277 if (msg->msg.data_len < UUID_SIZE + 1) { 3278 bmc->dyn_guid_set = 0; 3279 dev_warn(intf->si_dev, 3280 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3281 msg->msg.data_len, UUID_SIZE + 1); 3282 goto out; 3283 } 3284 3285 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3286 /* 3287 * Make sure the guid data is available before setting 3288 * dyn_guid_set. 3289 */ 3290 smp_wmb(); 3291 bmc->dyn_guid_set = 1; 3292 out: 3293 wake_up(&intf->waitq); 3294 } 3295 3296 static void __get_guid(struct ipmi_smi *intf) 3297 { 3298 int rv; 3299 struct bmc_device *bmc = intf->bmc; 3300 3301 bmc->dyn_guid_set = 2; 3302 intf->null_user_handler = guid_handler; 3303 rv = send_guid_cmd(intf, 0); 3304 if (rv) 3305 /* Send failed, no GUID available. */ 3306 bmc->dyn_guid_set = 0; 3307 else 3308 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3309 3310 /* dyn_guid_set makes the guid data available. */ 3311 smp_rmb(); 3312 3313 intf->null_user_handler = NULL; 3314 } 3315 3316 static int 3317 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3318 { 3319 struct kernel_ipmi_msg msg; 3320 unsigned char data[1]; 3321 struct ipmi_system_interface_addr si; 3322 3323 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3324 si.channel = IPMI_BMC_CHANNEL; 3325 si.lun = 0; 3326 3327 msg.netfn = IPMI_NETFN_APP_REQUEST; 3328 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3329 msg.data = data; 3330 msg.data_len = 1; 3331 data[0] = chan; 3332 return i_ipmi_request(NULL, 3333 intf, 3334 (struct ipmi_addr *) &si, 3335 0, 3336 &msg, 3337 intf, 3338 NULL, 3339 NULL, 3340 0, 3341 intf->addrinfo[0].address, 3342 intf->addrinfo[0].lun, 3343 -1, 0); 3344 } 3345 3346 static void 3347 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3348 { 3349 int rv = 0; 3350 int ch; 3351 unsigned int set = intf->curr_working_cset; 3352 struct ipmi_channel *chans; 3353 3354 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3355 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3356 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3357 /* It's the one we want */ 3358 if (msg->msg.data[0] != 0) { 3359 /* Got an error from the channel, just go on. */ 3360 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3361 /* 3362 * If the MC does not support this 3363 * command, that is legal. We just 3364 * assume it has one IPMB at channel 3365 * zero. 3366 */ 3367 intf->wchannels[set].c[0].medium 3368 = IPMI_CHANNEL_MEDIUM_IPMB; 3369 intf->wchannels[set].c[0].protocol 3370 = IPMI_CHANNEL_PROTOCOL_IPMB; 3371 3372 intf->channel_list = intf->wchannels + set; 3373 intf->channels_ready = true; 3374 wake_up(&intf->waitq); 3375 goto out; 3376 } 3377 goto next_channel; 3378 } 3379 if (msg->msg.data_len < 4) { 3380 /* Message not big enough, just go on. */ 3381 goto next_channel; 3382 } 3383 ch = intf->curr_channel; 3384 chans = intf->wchannels[set].c; 3385 chans[ch].medium = msg->msg.data[2] & 0x7f; 3386 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3387 3388 next_channel: 3389 intf->curr_channel++; 3390 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3391 intf->channel_list = intf->wchannels + set; 3392 intf->channels_ready = true; 3393 wake_up(&intf->waitq); 3394 } else { 3395 intf->channel_list = intf->wchannels + set; 3396 intf->channels_ready = true; 3397 rv = send_channel_info_cmd(intf, intf->curr_channel); 3398 } 3399 3400 if (rv) { 3401 /* Got an error somehow, just give up. */ 3402 dev_warn(intf->si_dev, 3403 "Error sending channel information for channel %d: %d\n", 3404 intf->curr_channel, rv); 3405 3406 intf->channel_list = intf->wchannels + set; 3407 intf->channels_ready = true; 3408 wake_up(&intf->waitq); 3409 } 3410 } 3411 out: 3412 return; 3413 } 3414 3415 /* 3416 * Must be holding intf->bmc_reg_mutex to call this. 3417 */ 3418 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3419 { 3420 int rv; 3421 3422 if (ipmi_version_major(id) > 1 3423 || (ipmi_version_major(id) == 1 3424 && ipmi_version_minor(id) >= 5)) { 3425 unsigned int set; 3426 3427 /* 3428 * Start scanning the channels to see what is 3429 * available. 3430 */ 3431 set = !intf->curr_working_cset; 3432 intf->curr_working_cset = set; 3433 memset(&intf->wchannels[set], 0, 3434 sizeof(struct ipmi_channel_set)); 3435 3436 intf->null_user_handler = channel_handler; 3437 intf->curr_channel = 0; 3438 rv = send_channel_info_cmd(intf, 0); 3439 if (rv) { 3440 dev_warn(intf->si_dev, 3441 "Error sending channel information for channel 0, %d\n", 3442 rv); 3443 intf->null_user_handler = NULL; 3444 return -EIO; 3445 } 3446 3447 /* Wait for the channel info to be read. */ 3448 wait_event(intf->waitq, intf->channels_ready); 3449 intf->null_user_handler = NULL; 3450 } else { 3451 unsigned int set = intf->curr_working_cset; 3452 3453 /* Assume a single IPMB channel at zero. */ 3454 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3455 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3456 intf->channel_list = intf->wchannels + set; 3457 intf->channels_ready = true; 3458 } 3459 3460 return 0; 3461 } 3462 3463 static void ipmi_poll(struct ipmi_smi *intf) 3464 { 3465 if (intf->handlers->poll) 3466 intf->handlers->poll(intf->send_info); 3467 /* In case something came in */ 3468 handle_new_recv_msgs(intf); 3469 } 3470 3471 void ipmi_poll_interface(struct ipmi_user *user) 3472 { 3473 ipmi_poll(user->intf); 3474 } 3475 EXPORT_SYMBOL(ipmi_poll_interface); 3476 3477 static ssize_t nr_users_show(struct device *dev, 3478 struct device_attribute *attr, 3479 char *buf) 3480 { 3481 struct ipmi_smi *intf = container_of(attr, 3482 struct ipmi_smi, nr_users_devattr); 3483 3484 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3485 } 3486 static DEVICE_ATTR_RO(nr_users); 3487 3488 static ssize_t nr_msgs_show(struct device *dev, 3489 struct device_attribute *attr, 3490 char *buf) 3491 { 3492 struct ipmi_smi *intf = container_of(attr, 3493 struct ipmi_smi, nr_msgs_devattr); 3494 struct ipmi_user *user; 3495 unsigned int count = 0; 3496 3497 mutex_lock(&intf->users_mutex); 3498 list_for_each_entry(user, &intf->users, link) 3499 count += atomic_read(&user->nr_msgs); 3500 mutex_unlock(&intf->users_mutex); 3501 3502 return sysfs_emit(buf, "%u\n", count); 3503 } 3504 static DEVICE_ATTR_RO(nr_msgs); 3505 3506 static void redo_bmc_reg(struct work_struct *work) 3507 { 3508 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3509 bmc_reg_work); 3510 3511 if (!intf->in_shutdown) 3512 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3513 3514 kref_put(&intf->refcount, intf_free); 3515 } 3516 3517 int ipmi_add_smi(struct module *owner, 3518 const struct ipmi_smi_handlers *handlers, 3519 void *send_info, 3520 struct device *si_dev, 3521 unsigned char slave_addr) 3522 { 3523 int i, j; 3524 int rv; 3525 struct ipmi_smi *intf, *tintf; 3526 struct list_head *link; 3527 struct ipmi_device_id id; 3528 3529 /* 3530 * Make sure the driver is actually initialized, this handles 3531 * problems with initialization order. 3532 */ 3533 rv = ipmi_init_msghandler(); 3534 if (rv) 3535 return rv; 3536 3537 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3538 if (!intf) 3539 return -ENOMEM; 3540 3541 intf->owner = owner; 3542 intf->bmc = &intf->tmp_bmc; 3543 INIT_LIST_HEAD(&intf->bmc->intfs); 3544 mutex_init(&intf->bmc->dyn_mutex); 3545 INIT_LIST_HEAD(&intf->bmc_link); 3546 mutex_init(&intf->bmc_reg_mutex); 3547 intf->intf_num = -1; /* Mark it invalid for now. */ 3548 kref_init(&intf->refcount); 3549 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3550 intf->si_dev = si_dev; 3551 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3552 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3553 intf->addrinfo[j].lun = 2; 3554 } 3555 if (slave_addr != 0) 3556 intf->addrinfo[0].address = slave_addr; 3557 INIT_LIST_HEAD(&intf->user_msgs); 3558 mutex_init(&intf->user_msgs_mutex); 3559 INIT_LIST_HEAD(&intf->users); 3560 mutex_init(&intf->users_mutex); 3561 atomic_set(&intf->nr_users, 0); 3562 intf->handlers = handlers; 3563 intf->send_info = send_info; 3564 spin_lock_init(&intf->seq_lock); 3565 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3566 intf->seq_table[j].inuse = 0; 3567 intf->seq_table[j].seqid = 0; 3568 } 3569 intf->curr_seq = 0; 3570 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3571 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3572 INIT_WORK(&intf->smi_work, smi_work); 3573 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3574 spin_lock_init(&intf->xmit_msgs_lock); 3575 INIT_LIST_HEAD(&intf->xmit_msgs); 3576 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3577 mutex_init(&intf->events_mutex); 3578 spin_lock_init(&intf->watch_lock); 3579 atomic_set(&intf->event_waiters, 0); 3580 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3581 INIT_LIST_HEAD(&intf->waiting_events); 3582 intf->waiting_events_count = 0; 3583 mutex_init(&intf->cmd_rcvrs_mutex); 3584 spin_lock_init(&intf->maintenance_mode_lock); 3585 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3586 init_waitqueue_head(&intf->waitq); 3587 for (i = 0; i < IPMI_NUM_STATS; i++) 3588 atomic_set(&intf->stats[i], 0); 3589 3590 /* 3591 * Grab the watchers mutex so we can deliver the new interface 3592 * without races. 3593 */ 3594 mutex_lock(&smi_watchers_mutex); 3595 mutex_lock(&ipmi_interfaces_mutex); 3596 /* Look for a hole in the numbers. */ 3597 i = 0; 3598 link = &ipmi_interfaces; 3599 list_for_each_entry(tintf, &ipmi_interfaces, link) { 3600 if (tintf->intf_num != i) { 3601 link = &tintf->link; 3602 break; 3603 } 3604 i++; 3605 } 3606 /* Add the new interface in numeric order. */ 3607 if (i == 0) 3608 list_add(&intf->link, &ipmi_interfaces); 3609 else 3610 list_add_tail(&intf->link, link); 3611 3612 rv = handlers->start_processing(send_info, intf); 3613 if (rv) 3614 goto out_err; 3615 3616 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3617 if (rv) { 3618 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3619 goto out_err_started; 3620 } 3621 3622 mutex_lock(&intf->bmc_reg_mutex); 3623 rv = __scan_channels(intf, &id); 3624 mutex_unlock(&intf->bmc_reg_mutex); 3625 if (rv) 3626 goto out_err_bmc_reg; 3627 3628 intf->nr_users_devattr = dev_attr_nr_users; 3629 sysfs_attr_init(&intf->nr_users_devattr.attr); 3630 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3631 if (rv) 3632 goto out_err_bmc_reg; 3633 3634 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3635 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3636 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3637 if (rv) { 3638 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3639 goto out_err_bmc_reg; 3640 } 3641 3642 intf->intf_num = i; 3643 mutex_unlock(&ipmi_interfaces_mutex); 3644 3645 /* After this point the interface is legal to use. */ 3646 call_smi_watchers(i, intf->si_dev); 3647 3648 mutex_unlock(&smi_watchers_mutex); 3649 3650 return 0; 3651 3652 out_err_bmc_reg: 3653 ipmi_bmc_unregister(intf); 3654 out_err_started: 3655 if (intf->handlers->shutdown) 3656 intf->handlers->shutdown(intf->send_info); 3657 out_err: 3658 list_del(&intf->link); 3659 mutex_unlock(&ipmi_interfaces_mutex); 3660 mutex_unlock(&smi_watchers_mutex); 3661 kref_put(&intf->refcount, intf_free); 3662 3663 return rv; 3664 } 3665 EXPORT_SYMBOL(ipmi_add_smi); 3666 3667 static void deliver_smi_err_response(struct ipmi_smi *intf, 3668 struct ipmi_smi_msg *msg, 3669 unsigned char err) 3670 { 3671 int rv; 3672 msg->rsp[0] = msg->data[0] | 4; 3673 msg->rsp[1] = msg->data[1]; 3674 msg->rsp[2] = err; 3675 msg->rsp_size = 3; 3676 3677 /* This will never requeue, but it may ask us to free the message. */ 3678 rv = handle_one_recv_msg(intf, msg); 3679 if (rv == 0) 3680 ipmi_free_smi_msg(msg); 3681 } 3682 3683 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3684 { 3685 int i; 3686 struct seq_table *ent; 3687 struct ipmi_smi_msg *msg; 3688 struct list_head *entry; 3689 struct list_head tmplist; 3690 3691 /* Clear out our transmit queues and hold the messages. */ 3692 INIT_LIST_HEAD(&tmplist); 3693 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3694 list_splice_tail(&intf->xmit_msgs, &tmplist); 3695 3696 /* Current message first, to preserve order */ 3697 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3698 /* Wait for the message to clear out. */ 3699 schedule_timeout(1); 3700 } 3701 3702 /* No need for locks, the interface is down. */ 3703 3704 /* 3705 * Return errors for all pending messages in queue and in the 3706 * tables waiting for remote responses. 3707 */ 3708 while (!list_empty(&tmplist)) { 3709 entry = tmplist.next; 3710 list_del(entry); 3711 msg = list_entry(entry, struct ipmi_smi_msg, link); 3712 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3713 } 3714 3715 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3716 ent = &intf->seq_table[i]; 3717 if (!ent->inuse) 3718 continue; 3719 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3720 } 3721 } 3722 3723 void ipmi_unregister_smi(struct ipmi_smi *intf) 3724 { 3725 struct ipmi_smi_watcher *w; 3726 int intf_num; 3727 3728 if (!intf) 3729 return; 3730 3731 intf_num = intf->intf_num; 3732 mutex_lock(&ipmi_interfaces_mutex); 3733 cancel_work_sync(&intf->smi_work); 3734 /* smi_work() can no longer be in progress after this. */ 3735 3736 intf->intf_num = -1; 3737 intf->in_shutdown = true; 3738 list_del(&intf->link); 3739 mutex_unlock(&ipmi_interfaces_mutex); 3740 3741 /* 3742 * At this point no users can be added to the interface and no 3743 * new messages can be sent. 3744 */ 3745 3746 if (intf->handlers->shutdown) 3747 intf->handlers->shutdown(intf->send_info); 3748 3749 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3750 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3751 3752 /* 3753 * Call all the watcher interfaces to tell them that 3754 * an interface is going away. 3755 */ 3756 mutex_lock(&smi_watchers_mutex); 3757 list_for_each_entry(w, &smi_watchers, link) 3758 w->smi_gone(intf_num); 3759 mutex_unlock(&smi_watchers_mutex); 3760 3761 mutex_lock(&intf->users_mutex); 3762 while (!list_empty(&intf->users)) { 3763 struct ipmi_user *user = list_first_entry(&intf->users, 3764 struct ipmi_user, link); 3765 3766 _ipmi_destroy_user(user); 3767 } 3768 mutex_unlock(&intf->users_mutex); 3769 3770 cleanup_smi_msgs(intf); 3771 3772 ipmi_bmc_unregister(intf); 3773 3774 kref_put(&intf->refcount, intf_free); 3775 } 3776 EXPORT_SYMBOL(ipmi_unregister_smi); 3777 3778 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3779 struct ipmi_smi_msg *msg) 3780 { 3781 struct ipmi_ipmb_addr ipmb_addr; 3782 struct ipmi_recv_msg *recv_msg; 3783 3784 /* 3785 * This is 11, not 10, because the response must contain a 3786 * completion code. 3787 */ 3788 if (msg->rsp_size < 11) { 3789 /* Message not big enough, just ignore it. */ 3790 ipmi_inc_stat(intf, invalid_ipmb_responses); 3791 return 0; 3792 } 3793 3794 if (msg->rsp[2] != 0) { 3795 /* An error getting the response, just ignore it. */ 3796 return 0; 3797 } 3798 3799 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3800 ipmb_addr.slave_addr = msg->rsp[6]; 3801 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3802 ipmb_addr.lun = msg->rsp[7] & 3; 3803 3804 /* 3805 * It's a response from a remote entity. Look up the sequence 3806 * number and handle the response. 3807 */ 3808 if (intf_find_seq(intf, 3809 msg->rsp[7] >> 2, 3810 msg->rsp[3] & 0x0f, 3811 msg->rsp[8], 3812 (msg->rsp[4] >> 2) & (~1), 3813 (struct ipmi_addr *) &ipmb_addr, 3814 &recv_msg)) { 3815 /* 3816 * We were unable to find the sequence number, 3817 * so just nuke the message. 3818 */ 3819 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3820 return 0; 3821 } 3822 3823 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3824 /* 3825 * The other fields matched, so no need to set them, except 3826 * for netfn, which needs to be the response that was 3827 * returned, not the request value. 3828 */ 3829 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3830 recv_msg->msg.data = recv_msg->msg_data; 3831 recv_msg->msg.data_len = msg->rsp_size - 10; 3832 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3833 if (deliver_response(intf, recv_msg)) 3834 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3835 else 3836 ipmi_inc_stat(intf, handled_ipmb_responses); 3837 3838 return 0; 3839 } 3840 3841 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3842 struct ipmi_smi_msg *msg) 3843 { 3844 struct cmd_rcvr *rcvr; 3845 int rv = 0; 3846 unsigned char netfn; 3847 unsigned char cmd; 3848 unsigned char chan; 3849 struct ipmi_user *user = NULL; 3850 struct ipmi_ipmb_addr *ipmb_addr; 3851 struct ipmi_recv_msg *recv_msg; 3852 3853 if (msg->rsp_size < 10) { 3854 /* Message not big enough, just ignore it. */ 3855 ipmi_inc_stat(intf, invalid_commands); 3856 return 0; 3857 } 3858 3859 if (msg->rsp[2] != 0) { 3860 /* An error getting the response, just ignore it. */ 3861 return 0; 3862 } 3863 3864 netfn = msg->rsp[4] >> 2; 3865 cmd = msg->rsp[8]; 3866 chan = msg->rsp[3] & 0xf; 3867 3868 rcu_read_lock(); 3869 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3870 if (rcvr) { 3871 user = rcvr->user; 3872 kref_get(&user->refcount); 3873 } else 3874 user = NULL; 3875 rcu_read_unlock(); 3876 3877 if (user == NULL) { 3878 /* We didn't find a user, deliver an error response. */ 3879 ipmi_inc_stat(intf, unhandled_commands); 3880 3881 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3882 msg->data[1] = IPMI_SEND_MSG_CMD; 3883 msg->data[2] = msg->rsp[3]; 3884 msg->data[3] = msg->rsp[6]; 3885 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3886 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3887 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3888 /* rqseq/lun */ 3889 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3890 msg->data[8] = msg->rsp[8]; /* cmd */ 3891 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3892 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3893 msg->data_size = 11; 3894 3895 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3896 msg->data_size, msg->data); 3897 3898 smi_send(intf, intf->handlers, msg, 0); 3899 /* 3900 * We used the message, so return the value that 3901 * causes it to not be freed or queued. 3902 */ 3903 rv = -1; 3904 } else { 3905 recv_msg = ipmi_alloc_recv_msg(); 3906 if (!recv_msg) { 3907 /* 3908 * We couldn't allocate memory for the 3909 * message, so requeue it for handling 3910 * later. 3911 */ 3912 rv = 1; 3913 kref_put(&user->refcount, free_ipmi_user); 3914 } else { 3915 /* Extract the source address from the data. */ 3916 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3917 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3918 ipmb_addr->slave_addr = msg->rsp[6]; 3919 ipmb_addr->lun = msg->rsp[7] & 3; 3920 ipmb_addr->channel = msg->rsp[3] & 0xf; 3921 3922 /* 3923 * Extract the rest of the message information 3924 * from the IPMB header. 3925 */ 3926 recv_msg->user = user; 3927 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3928 recv_msg->msgid = msg->rsp[7] >> 2; 3929 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3930 recv_msg->msg.cmd = msg->rsp[8]; 3931 recv_msg->msg.data = recv_msg->msg_data; 3932 3933 /* 3934 * We chop off 10, not 9 bytes because the checksum 3935 * at the end also needs to be removed. 3936 */ 3937 recv_msg->msg.data_len = msg->rsp_size - 10; 3938 memcpy(recv_msg->msg_data, &msg->rsp[9], 3939 msg->rsp_size - 10); 3940 if (deliver_response(intf, recv_msg)) 3941 ipmi_inc_stat(intf, unhandled_commands); 3942 else 3943 ipmi_inc_stat(intf, handled_commands); 3944 } 3945 } 3946 3947 return rv; 3948 } 3949 3950 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3951 struct ipmi_smi_msg *msg) 3952 { 3953 struct cmd_rcvr *rcvr; 3954 int rv = 0; 3955 struct ipmi_user *user = NULL; 3956 struct ipmi_ipmb_direct_addr *daddr; 3957 struct ipmi_recv_msg *recv_msg; 3958 unsigned char netfn = msg->rsp[0] >> 2; 3959 unsigned char cmd = msg->rsp[3]; 3960 3961 rcu_read_lock(); 3962 /* We always use channel 0 for direct messages. */ 3963 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3964 if (rcvr) { 3965 user = rcvr->user; 3966 kref_get(&user->refcount); 3967 } else 3968 user = NULL; 3969 rcu_read_unlock(); 3970 3971 if (user == NULL) { 3972 /* We didn't find a user, deliver an error response. */ 3973 ipmi_inc_stat(intf, unhandled_commands); 3974 3975 msg->data[0] = (netfn + 1) << 2; 3976 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 3977 msg->data[1] = msg->rsp[1]; /* Addr */ 3978 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 3979 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 3980 msg->data[3] = cmd; 3981 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 3982 msg->data_size = 5; 3983 3984 smi_send(intf, intf->handlers, msg, 0); 3985 /* 3986 * We used the message, so return the value that 3987 * causes it to not be freed or queued. 3988 */ 3989 rv = -1; 3990 } else { 3991 recv_msg = ipmi_alloc_recv_msg(); 3992 if (!recv_msg) { 3993 /* 3994 * We couldn't allocate memory for the 3995 * message, so requeue it for handling 3996 * later. 3997 */ 3998 rv = 1; 3999 kref_put(&user->refcount, free_ipmi_user); 4000 } else { 4001 /* Extract the source address from the data. */ 4002 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4003 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4004 daddr->channel = 0; 4005 daddr->slave_addr = msg->rsp[1]; 4006 daddr->rs_lun = msg->rsp[0] & 3; 4007 daddr->rq_lun = msg->rsp[2] & 3; 4008 4009 /* 4010 * Extract the rest of the message information 4011 * from the IPMB header. 4012 */ 4013 recv_msg->user = user; 4014 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4015 recv_msg->msgid = (msg->rsp[2] >> 2); 4016 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4017 recv_msg->msg.cmd = msg->rsp[3]; 4018 recv_msg->msg.data = recv_msg->msg_data; 4019 4020 recv_msg->msg.data_len = msg->rsp_size - 4; 4021 memcpy(recv_msg->msg_data, msg->rsp + 4, 4022 msg->rsp_size - 4); 4023 if (deliver_response(intf, recv_msg)) 4024 ipmi_inc_stat(intf, unhandled_commands); 4025 else 4026 ipmi_inc_stat(intf, handled_commands); 4027 } 4028 } 4029 4030 return rv; 4031 } 4032 4033 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4034 struct ipmi_smi_msg *msg) 4035 { 4036 struct ipmi_recv_msg *recv_msg; 4037 struct ipmi_ipmb_direct_addr *daddr; 4038 4039 recv_msg = msg->user_data; 4040 if (recv_msg == NULL) { 4041 dev_warn(intf->si_dev, 4042 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4043 return 0; 4044 } 4045 4046 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4047 recv_msg->msgid = msg->msgid; 4048 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4049 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4050 daddr->channel = 0; 4051 daddr->slave_addr = msg->rsp[1]; 4052 daddr->rq_lun = msg->rsp[0] & 3; 4053 daddr->rs_lun = msg->rsp[2] & 3; 4054 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4055 recv_msg->msg.cmd = msg->rsp[3]; 4056 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4057 recv_msg->msg.data = recv_msg->msg_data; 4058 recv_msg->msg.data_len = msg->rsp_size - 4; 4059 deliver_local_response(intf, recv_msg); 4060 4061 return 0; 4062 } 4063 4064 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4065 struct ipmi_smi_msg *msg) 4066 { 4067 struct ipmi_lan_addr lan_addr; 4068 struct ipmi_recv_msg *recv_msg; 4069 4070 4071 /* 4072 * This is 13, not 12, because the response must contain a 4073 * completion code. 4074 */ 4075 if (msg->rsp_size < 13) { 4076 /* Message not big enough, just ignore it. */ 4077 ipmi_inc_stat(intf, invalid_lan_responses); 4078 return 0; 4079 } 4080 4081 if (msg->rsp[2] != 0) { 4082 /* An error getting the response, just ignore it. */ 4083 return 0; 4084 } 4085 4086 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4087 lan_addr.session_handle = msg->rsp[4]; 4088 lan_addr.remote_SWID = msg->rsp[8]; 4089 lan_addr.local_SWID = msg->rsp[5]; 4090 lan_addr.channel = msg->rsp[3] & 0x0f; 4091 lan_addr.privilege = msg->rsp[3] >> 4; 4092 lan_addr.lun = msg->rsp[9] & 3; 4093 4094 /* 4095 * It's a response from a remote entity. Look up the sequence 4096 * number and handle the response. 4097 */ 4098 if (intf_find_seq(intf, 4099 msg->rsp[9] >> 2, 4100 msg->rsp[3] & 0x0f, 4101 msg->rsp[10], 4102 (msg->rsp[6] >> 2) & (~1), 4103 (struct ipmi_addr *) &lan_addr, 4104 &recv_msg)) { 4105 /* 4106 * We were unable to find the sequence number, 4107 * so just nuke the message. 4108 */ 4109 ipmi_inc_stat(intf, unhandled_lan_responses); 4110 return 0; 4111 } 4112 4113 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4114 /* 4115 * The other fields matched, so no need to set them, except 4116 * for netfn, which needs to be the response that was 4117 * returned, not the request value. 4118 */ 4119 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4120 recv_msg->msg.data = recv_msg->msg_data; 4121 recv_msg->msg.data_len = msg->rsp_size - 12; 4122 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4123 if (deliver_response(intf, recv_msg)) 4124 ipmi_inc_stat(intf, unhandled_lan_responses); 4125 else 4126 ipmi_inc_stat(intf, handled_lan_responses); 4127 4128 return 0; 4129 } 4130 4131 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4132 struct ipmi_smi_msg *msg) 4133 { 4134 struct cmd_rcvr *rcvr; 4135 int rv = 0; 4136 unsigned char netfn; 4137 unsigned char cmd; 4138 unsigned char chan; 4139 struct ipmi_user *user = NULL; 4140 struct ipmi_lan_addr *lan_addr; 4141 struct ipmi_recv_msg *recv_msg; 4142 4143 if (msg->rsp_size < 12) { 4144 /* Message not big enough, just ignore it. */ 4145 ipmi_inc_stat(intf, invalid_commands); 4146 return 0; 4147 } 4148 4149 if (msg->rsp[2] != 0) { 4150 /* An error getting the response, just ignore it. */ 4151 return 0; 4152 } 4153 4154 netfn = msg->rsp[6] >> 2; 4155 cmd = msg->rsp[10]; 4156 chan = msg->rsp[3] & 0xf; 4157 4158 rcu_read_lock(); 4159 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4160 if (rcvr) { 4161 user = rcvr->user; 4162 kref_get(&user->refcount); 4163 } else 4164 user = NULL; 4165 rcu_read_unlock(); 4166 4167 if (user == NULL) { 4168 /* We didn't find a user, just give up and return an error. */ 4169 ipmi_inc_stat(intf, unhandled_commands); 4170 4171 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 4172 msg->data[1] = IPMI_SEND_MSG_CMD; 4173 msg->data[2] = chan; 4174 msg->data[3] = msg->rsp[4]; /* handle */ 4175 msg->data[4] = msg->rsp[8]; /* rsSWID */ 4176 msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3); 4177 msg->data[6] = ipmb_checksum(&msg->data[3], 3); 4178 msg->data[7] = msg->rsp[5]; /* rqSWID */ 4179 /* rqseq/lun */ 4180 msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3); 4181 msg->data[9] = cmd; 4182 msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE; 4183 msg->data[11] = ipmb_checksum(&msg->data[7], 4); 4184 msg->data_size = 12; 4185 4186 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 4187 msg->data_size, msg->data); 4188 4189 smi_send(intf, intf->handlers, msg, 0); 4190 /* 4191 * We used the message, so return the value that 4192 * causes it to not be freed or queued. 4193 */ 4194 rv = -1; 4195 } else { 4196 recv_msg = ipmi_alloc_recv_msg(); 4197 if (!recv_msg) { 4198 /* 4199 * We couldn't allocate memory for the 4200 * message, so requeue it for handling later. 4201 */ 4202 rv = 1; 4203 kref_put(&user->refcount, free_ipmi_user); 4204 } else { 4205 /* Extract the source address from the data. */ 4206 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4207 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4208 lan_addr->session_handle = msg->rsp[4]; 4209 lan_addr->remote_SWID = msg->rsp[8]; 4210 lan_addr->local_SWID = msg->rsp[5]; 4211 lan_addr->lun = msg->rsp[9] & 3; 4212 lan_addr->channel = msg->rsp[3] & 0xf; 4213 lan_addr->privilege = msg->rsp[3] >> 4; 4214 4215 /* 4216 * Extract the rest of the message information 4217 * from the IPMB header. 4218 */ 4219 recv_msg->user = user; 4220 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4221 recv_msg->msgid = msg->rsp[9] >> 2; 4222 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4223 recv_msg->msg.cmd = msg->rsp[10]; 4224 recv_msg->msg.data = recv_msg->msg_data; 4225 4226 /* 4227 * We chop off 12, not 11 bytes because the checksum 4228 * at the end also needs to be removed. 4229 */ 4230 recv_msg->msg.data_len = msg->rsp_size - 12; 4231 memcpy(recv_msg->msg_data, &msg->rsp[11], 4232 msg->rsp_size - 12); 4233 if (deliver_response(intf, recv_msg)) 4234 ipmi_inc_stat(intf, unhandled_commands); 4235 else 4236 ipmi_inc_stat(intf, handled_commands); 4237 } 4238 } 4239 4240 return rv; 4241 } 4242 4243 /* 4244 * This routine will handle "Get Message" command responses with 4245 * channels that use an OEM Medium. The message format belongs to 4246 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4247 * Chapter 22, sections 22.6 and 22.24 for more details. 4248 */ 4249 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4250 struct ipmi_smi_msg *msg) 4251 { 4252 struct cmd_rcvr *rcvr; 4253 int rv = 0; 4254 unsigned char netfn; 4255 unsigned char cmd; 4256 unsigned char chan; 4257 struct ipmi_user *user = NULL; 4258 struct ipmi_system_interface_addr *smi_addr; 4259 struct ipmi_recv_msg *recv_msg; 4260 4261 /* 4262 * We expect the OEM SW to perform error checking 4263 * so we just do some basic sanity checks 4264 */ 4265 if (msg->rsp_size < 4) { 4266 /* Message not big enough, just ignore it. */ 4267 ipmi_inc_stat(intf, invalid_commands); 4268 return 0; 4269 } 4270 4271 if (msg->rsp[2] != 0) { 4272 /* An error getting the response, just ignore it. */ 4273 return 0; 4274 } 4275 4276 /* 4277 * This is an OEM Message so the OEM needs to know how 4278 * handle the message. We do no interpretation. 4279 */ 4280 netfn = msg->rsp[0] >> 2; 4281 cmd = msg->rsp[1]; 4282 chan = msg->rsp[3] & 0xf; 4283 4284 rcu_read_lock(); 4285 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4286 if (rcvr) { 4287 user = rcvr->user; 4288 kref_get(&user->refcount); 4289 } else 4290 user = NULL; 4291 rcu_read_unlock(); 4292 4293 if (user == NULL) { 4294 /* We didn't find a user, just give up. */ 4295 ipmi_inc_stat(intf, unhandled_commands); 4296 4297 /* 4298 * Don't do anything with these messages, just allow 4299 * them to be freed. 4300 */ 4301 4302 rv = 0; 4303 } else { 4304 recv_msg = ipmi_alloc_recv_msg(); 4305 if (!recv_msg) { 4306 /* 4307 * We couldn't allocate memory for the 4308 * message, so requeue it for handling 4309 * later. 4310 */ 4311 rv = 1; 4312 kref_put(&user->refcount, free_ipmi_user); 4313 } else { 4314 /* 4315 * OEM Messages are expected to be delivered via 4316 * the system interface to SMS software. We might 4317 * need to visit this again depending on OEM 4318 * requirements 4319 */ 4320 smi_addr = ((struct ipmi_system_interface_addr *) 4321 &recv_msg->addr); 4322 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4323 smi_addr->channel = IPMI_BMC_CHANNEL; 4324 smi_addr->lun = msg->rsp[0] & 3; 4325 4326 recv_msg->user = user; 4327 recv_msg->user_msg_data = NULL; 4328 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4329 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4330 recv_msg->msg.cmd = msg->rsp[1]; 4331 recv_msg->msg.data = recv_msg->msg_data; 4332 4333 /* 4334 * The message starts at byte 4 which follows the 4335 * Channel Byte in the "GET MESSAGE" command 4336 */ 4337 recv_msg->msg.data_len = msg->rsp_size - 4; 4338 memcpy(recv_msg->msg_data, &msg->rsp[4], 4339 msg->rsp_size - 4); 4340 if (deliver_response(intf, recv_msg)) 4341 ipmi_inc_stat(intf, unhandled_commands); 4342 else 4343 ipmi_inc_stat(intf, handled_commands); 4344 } 4345 } 4346 4347 return rv; 4348 } 4349 4350 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4351 struct ipmi_smi_msg *msg) 4352 { 4353 struct ipmi_system_interface_addr *smi_addr; 4354 4355 recv_msg->msgid = 0; 4356 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4357 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4358 smi_addr->channel = IPMI_BMC_CHANNEL; 4359 smi_addr->lun = msg->rsp[0] & 3; 4360 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4361 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4362 recv_msg->msg.cmd = msg->rsp[1]; 4363 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4364 recv_msg->msg.data = recv_msg->msg_data; 4365 recv_msg->msg.data_len = msg->rsp_size - 3; 4366 } 4367 4368 static int handle_read_event_rsp(struct ipmi_smi *intf, 4369 struct ipmi_smi_msg *msg) 4370 { 4371 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4372 struct list_head msgs; 4373 struct ipmi_user *user; 4374 int rv = 0, deliver_count = 0; 4375 4376 if (msg->rsp_size < 19) { 4377 /* Message is too small to be an IPMB event. */ 4378 ipmi_inc_stat(intf, invalid_events); 4379 return 0; 4380 } 4381 4382 if (msg->rsp[2] != 0) { 4383 /* An error getting the event, just ignore it. */ 4384 return 0; 4385 } 4386 4387 INIT_LIST_HEAD(&msgs); 4388 4389 mutex_lock(&intf->events_mutex); 4390 4391 ipmi_inc_stat(intf, events); 4392 4393 /* 4394 * Allocate and fill in one message for every user that is 4395 * getting events. 4396 */ 4397 mutex_lock(&intf->users_mutex); 4398 list_for_each_entry(user, &intf->users, link) { 4399 if (!user->gets_events) 4400 continue; 4401 4402 recv_msg = ipmi_alloc_recv_msg(); 4403 if (!recv_msg) { 4404 mutex_unlock(&intf->users_mutex); 4405 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4406 link) { 4407 user = recv_msg->user; 4408 list_del(&recv_msg->link); 4409 ipmi_free_recv_msg(recv_msg); 4410 kref_put(&user->refcount, free_ipmi_user); 4411 } 4412 /* 4413 * We couldn't allocate memory for the 4414 * message, so requeue it for handling 4415 * later. 4416 */ 4417 rv = 1; 4418 goto out; 4419 } 4420 4421 deliver_count++; 4422 4423 copy_event_into_recv_msg(recv_msg, msg); 4424 recv_msg->user = user; 4425 kref_get(&user->refcount); 4426 list_add_tail(&recv_msg->link, &msgs); 4427 } 4428 mutex_unlock(&intf->users_mutex); 4429 4430 if (deliver_count) { 4431 /* Now deliver all the messages. */ 4432 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4433 list_del(&recv_msg->link); 4434 deliver_local_response(intf, recv_msg); 4435 } 4436 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4437 /* 4438 * No one to receive the message, put it in queue if there's 4439 * not already too many things in the queue. 4440 */ 4441 recv_msg = ipmi_alloc_recv_msg(); 4442 if (!recv_msg) { 4443 /* 4444 * We couldn't allocate memory for the 4445 * message, so requeue it for handling 4446 * later. 4447 */ 4448 rv = 1; 4449 goto out; 4450 } 4451 4452 copy_event_into_recv_msg(recv_msg, msg); 4453 list_add_tail(&recv_msg->link, &intf->waiting_events); 4454 intf->waiting_events_count++; 4455 } else if (!intf->event_msg_printed) { 4456 /* 4457 * There's too many things in the queue, discard this 4458 * message. 4459 */ 4460 dev_warn(intf->si_dev, 4461 "Event queue full, discarding incoming events\n"); 4462 intf->event_msg_printed = 1; 4463 } 4464 4465 out: 4466 mutex_unlock(&intf->events_mutex); 4467 4468 return rv; 4469 } 4470 4471 static int handle_bmc_rsp(struct ipmi_smi *intf, 4472 struct ipmi_smi_msg *msg) 4473 { 4474 struct ipmi_recv_msg *recv_msg; 4475 struct ipmi_system_interface_addr *smi_addr; 4476 4477 recv_msg = msg->user_data; 4478 if (recv_msg == NULL) { 4479 dev_warn(intf->si_dev, 4480 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4481 return 0; 4482 } 4483 4484 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4485 recv_msg->msgid = msg->msgid; 4486 smi_addr = ((struct ipmi_system_interface_addr *) 4487 &recv_msg->addr); 4488 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4489 smi_addr->channel = IPMI_BMC_CHANNEL; 4490 smi_addr->lun = msg->rsp[0] & 3; 4491 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4492 recv_msg->msg.cmd = msg->rsp[1]; 4493 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4494 recv_msg->msg.data = recv_msg->msg_data; 4495 recv_msg->msg.data_len = msg->rsp_size - 2; 4496 deliver_local_response(intf, recv_msg); 4497 4498 return 0; 4499 } 4500 4501 /* 4502 * Handle a received message. Return 1 if the message should be requeued, 4503 * 0 if the message should be freed, or -1 if the message should not 4504 * be freed or requeued. 4505 */ 4506 static int handle_one_recv_msg(struct ipmi_smi *intf, 4507 struct ipmi_smi_msg *msg) 4508 { 4509 int requeue = 0; 4510 int chan; 4511 unsigned char cc; 4512 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4513 4514 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4515 4516 if (msg->rsp_size < 2) { 4517 /* Message is too small to be correct. */ 4518 dev_warn(intf->si_dev, 4519 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4520 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4521 4522 return_unspecified: 4523 /* Generate an error response for the message. */ 4524 msg->rsp[0] = msg->data[0] | (1 << 2); 4525 msg->rsp[1] = msg->data[1]; 4526 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4527 msg->rsp_size = 3; 4528 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4529 /* commands must have at least 4 bytes, responses 5. */ 4530 if (is_cmd && (msg->rsp_size < 4)) { 4531 ipmi_inc_stat(intf, invalid_commands); 4532 goto out; 4533 } 4534 if (!is_cmd && (msg->rsp_size < 5)) { 4535 ipmi_inc_stat(intf, invalid_ipmb_responses); 4536 /* Construct a valid error response. */ 4537 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4538 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4539 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4540 msg->rsp[1] = msg->data[1]; /* Addr */ 4541 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4542 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4543 msg->rsp[3] = msg->data[3]; /* Cmd */ 4544 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4545 msg->rsp_size = 5; 4546 } 4547 } else if ((msg->data_size >= 2) 4548 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4549 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4550 && (msg->user_data == NULL)) { 4551 4552 if (intf->in_shutdown) 4553 goto out; 4554 4555 /* 4556 * This is the local response to a command send, start 4557 * the timer for these. The user_data will not be 4558 * NULL if this is a response send, and we will let 4559 * response sends just go through. 4560 */ 4561 4562 /* 4563 * Check for errors, if we get certain errors (ones 4564 * that mean basically we can try again later), we 4565 * ignore them and start the timer. Otherwise we 4566 * report the error immediately. 4567 */ 4568 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4569 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4570 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4571 && (msg->rsp[2] != IPMI_BUS_ERR) 4572 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4573 int ch = msg->rsp[3] & 0xf; 4574 struct ipmi_channel *chans; 4575 4576 /* Got an error sending the message, handle it. */ 4577 4578 chans = READ_ONCE(intf->channel_list)->c; 4579 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4580 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4581 ipmi_inc_stat(intf, sent_lan_command_errs); 4582 else 4583 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4584 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4585 } else 4586 /* The message was sent, start the timer. */ 4587 intf_start_seq_timer(intf, msg->msgid); 4588 requeue = 0; 4589 goto out; 4590 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4591 || (msg->rsp[1] != msg->data[1])) { 4592 /* 4593 * The NetFN and Command in the response is not even 4594 * marginally correct. 4595 */ 4596 dev_warn(intf->si_dev, 4597 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4598 (msg->data[0] >> 2) | 1, msg->data[1], 4599 msg->rsp[0] >> 2, msg->rsp[1]); 4600 4601 goto return_unspecified; 4602 } 4603 4604 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4605 if ((msg->data[0] >> 2) & 1) { 4606 /* It's a response to a sent response. */ 4607 chan = 0; 4608 cc = msg->rsp[4]; 4609 goto process_response_response; 4610 } 4611 if (is_cmd) 4612 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4613 else 4614 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4615 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4616 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4617 && (msg->user_data != NULL)) { 4618 /* 4619 * It's a response to a response we sent. For this we 4620 * deliver a send message response to the user. 4621 */ 4622 struct ipmi_recv_msg *recv_msg; 4623 4624 chan = msg->data[2] & 0x0f; 4625 if (chan >= IPMI_MAX_CHANNELS) 4626 /* Invalid channel number */ 4627 goto out; 4628 cc = msg->rsp[2]; 4629 4630 process_response_response: 4631 recv_msg = msg->user_data; 4632 4633 requeue = 0; 4634 if (!recv_msg) 4635 goto out; 4636 4637 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4638 recv_msg->msg.data = recv_msg->msg_data; 4639 recv_msg->msg_data[0] = cc; 4640 recv_msg->msg.data_len = 1; 4641 deliver_local_response(intf, recv_msg); 4642 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4643 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4644 struct ipmi_channel *chans; 4645 4646 /* It's from the receive queue. */ 4647 chan = msg->rsp[3] & 0xf; 4648 if (chan >= IPMI_MAX_CHANNELS) { 4649 /* Invalid channel number */ 4650 requeue = 0; 4651 goto out; 4652 } 4653 4654 /* 4655 * We need to make sure the channels have been initialized. 4656 * The channel_handler routine will set the "curr_channel" 4657 * equal to or greater than IPMI_MAX_CHANNELS when all the 4658 * channels for this interface have been initialized. 4659 */ 4660 if (!intf->channels_ready) { 4661 requeue = 0; /* Throw the message away */ 4662 goto out; 4663 } 4664 4665 chans = READ_ONCE(intf->channel_list)->c; 4666 4667 switch (chans[chan].medium) { 4668 case IPMI_CHANNEL_MEDIUM_IPMB: 4669 if (msg->rsp[4] & 0x04) { 4670 /* 4671 * It's a response, so find the 4672 * requesting message and send it up. 4673 */ 4674 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4675 } else { 4676 /* 4677 * It's a command to the SMS from some other 4678 * entity. Handle that. 4679 */ 4680 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4681 } 4682 break; 4683 4684 case IPMI_CHANNEL_MEDIUM_8023LAN: 4685 case IPMI_CHANNEL_MEDIUM_ASYNC: 4686 if (msg->rsp[6] & 0x04) { 4687 /* 4688 * It's a response, so find the 4689 * requesting message and send it up. 4690 */ 4691 requeue = handle_lan_get_msg_rsp(intf, msg); 4692 } else { 4693 /* 4694 * It's a command to the SMS from some other 4695 * entity. Handle that. 4696 */ 4697 requeue = handle_lan_get_msg_cmd(intf, msg); 4698 } 4699 break; 4700 4701 default: 4702 /* Check for OEM Channels. Clients had better 4703 register for these commands. */ 4704 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4705 && (chans[chan].medium 4706 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4707 requeue = handle_oem_get_msg_cmd(intf, msg); 4708 } else { 4709 /* 4710 * We don't handle the channel type, so just 4711 * free the message. 4712 */ 4713 requeue = 0; 4714 } 4715 } 4716 4717 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4718 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4719 /* It's an asynchronous event. */ 4720 requeue = handle_read_event_rsp(intf, msg); 4721 } else { 4722 /* It's a response from the local BMC. */ 4723 requeue = handle_bmc_rsp(intf, msg); 4724 } 4725 4726 out: 4727 return requeue; 4728 } 4729 4730 /* 4731 * If there are messages in the queue or pretimeouts, handle them. 4732 */ 4733 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4734 { 4735 struct ipmi_smi_msg *smi_msg; 4736 unsigned long flags = 0; 4737 int rv; 4738 int run_to_completion = READ_ONCE(intf->run_to_completion); 4739 4740 /* See if any waiting messages need to be processed. */ 4741 if (!run_to_completion) 4742 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4743 while (!list_empty(&intf->waiting_rcv_msgs)) { 4744 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4745 struct ipmi_smi_msg, link); 4746 list_del(&smi_msg->link); 4747 if (!run_to_completion) 4748 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4749 flags); 4750 rv = handle_one_recv_msg(intf, smi_msg); 4751 if (!run_to_completion) 4752 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4753 if (rv > 0) { 4754 /* 4755 * To preserve message order, quit if we 4756 * can't handle a message. Add the message 4757 * back at the head, this is safe because this 4758 * workqueue is the only thing that pulls the 4759 * messages. 4760 */ 4761 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4762 break; 4763 } else { 4764 if (rv == 0) 4765 /* Message handled */ 4766 ipmi_free_smi_msg(smi_msg); 4767 /* If rv < 0, fatal error, del but don't free. */ 4768 } 4769 } 4770 if (!run_to_completion) 4771 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4772 } 4773 4774 static void smi_work(struct work_struct *t) 4775 { 4776 unsigned long flags = 0; /* keep us warning-free. */ 4777 struct ipmi_smi *intf = from_work(intf, t, smi_work); 4778 int run_to_completion = READ_ONCE(intf->run_to_completion); 4779 struct ipmi_smi_msg *newmsg = NULL; 4780 struct ipmi_recv_msg *msg, *msg2; 4781 4782 /* 4783 * Start the next message if available. 4784 * 4785 * Do this here, not in the actual receiver, because we may deadlock 4786 * because the lower layer is allowed to hold locks while calling 4787 * message delivery. 4788 */ 4789 4790 if (!run_to_completion) 4791 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4792 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4793 struct list_head *entry = NULL; 4794 4795 /* Pick the high priority queue first. */ 4796 if (!list_empty(&intf->hp_xmit_msgs)) 4797 entry = intf->hp_xmit_msgs.next; 4798 else if (!list_empty(&intf->xmit_msgs)) 4799 entry = intf->xmit_msgs.next; 4800 4801 if (entry) { 4802 list_del(entry); 4803 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4804 intf->curr_msg = newmsg; 4805 } 4806 } 4807 if (!run_to_completion) 4808 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4809 4810 if (newmsg) 4811 intf->handlers->sender(intf->send_info, newmsg); 4812 4813 handle_new_recv_msgs(intf); 4814 4815 /* Nothing below applies during panic time. */ 4816 if (run_to_completion) 4817 return; 4818 4819 /* 4820 * If the pretimout count is non-zero, decrement one from it and 4821 * deliver pretimeouts to all the users. 4822 */ 4823 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4824 struct ipmi_user *user; 4825 4826 mutex_lock(&intf->users_mutex); 4827 list_for_each_entry(user, &intf->users, link) { 4828 if (user->handler->ipmi_watchdog_pretimeout) 4829 user->handler->ipmi_watchdog_pretimeout( 4830 user->handler_data); 4831 } 4832 mutex_unlock(&intf->users_mutex); 4833 } 4834 4835 /* 4836 * Freeing the message can cause a user to be released, which 4837 * can then cause the interface to be freed. Make sure that 4838 * doesn't happen until we are ready. 4839 */ 4840 kref_get(&intf->refcount); 4841 4842 mutex_lock(&intf->user_msgs_mutex); 4843 list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) { 4844 struct ipmi_user *user = msg->user; 4845 4846 list_del(&msg->link); 4847 atomic_dec(&user->nr_msgs); 4848 user->handler->ipmi_recv_hndl(msg, user->handler_data); 4849 } 4850 mutex_unlock(&intf->user_msgs_mutex); 4851 4852 kref_put(&intf->refcount, intf_free); 4853 } 4854 4855 /* Handle a new message from the lower layer. */ 4856 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4857 struct ipmi_smi_msg *msg) 4858 { 4859 unsigned long flags = 0; /* keep us warning-free. */ 4860 int run_to_completion = READ_ONCE(intf->run_to_completion); 4861 4862 /* 4863 * To preserve message order, we keep a queue and deliver from 4864 * a workqueue. 4865 */ 4866 if (!run_to_completion) 4867 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4868 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4869 if (!run_to_completion) 4870 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4871 flags); 4872 4873 if (!run_to_completion) 4874 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4875 /* 4876 * We can get an asynchronous event or receive message in addition 4877 * to commands we send. 4878 */ 4879 if (msg == intf->curr_msg) 4880 intf->curr_msg = NULL; 4881 if (!run_to_completion) 4882 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4883 4884 if (run_to_completion) 4885 smi_work(&intf->smi_work); 4886 else 4887 queue_work(system_wq, &intf->smi_work); 4888 } 4889 EXPORT_SYMBOL(ipmi_smi_msg_received); 4890 4891 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4892 { 4893 if (intf->in_shutdown) 4894 return; 4895 4896 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4897 queue_work(system_wq, &intf->smi_work); 4898 } 4899 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4900 4901 static struct ipmi_smi_msg * 4902 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4903 unsigned char seq, long seqid) 4904 { 4905 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4906 if (!smi_msg) 4907 /* 4908 * If we can't allocate the message, then just return, we 4909 * get 4 retries, so this should be ok. 4910 */ 4911 return NULL; 4912 4913 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4914 smi_msg->data_size = recv_msg->msg.data_len; 4915 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4916 4917 dev_dbg(intf->si_dev, "Resend: %*ph\n", 4918 smi_msg->data_size, smi_msg->data); 4919 4920 return smi_msg; 4921 } 4922 4923 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4924 struct list_head *timeouts, 4925 unsigned long timeout_period, 4926 int slot, unsigned long *flags, 4927 bool *need_timer) 4928 { 4929 struct ipmi_recv_msg *msg; 4930 4931 if (intf->in_shutdown) 4932 return; 4933 4934 if (!ent->inuse) 4935 return; 4936 4937 if (timeout_period < ent->timeout) { 4938 ent->timeout -= timeout_period; 4939 *need_timer = true; 4940 return; 4941 } 4942 4943 if (ent->retries_left == 0) { 4944 /* The message has used all its retries. */ 4945 ent->inuse = 0; 4946 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4947 msg = ent->recv_msg; 4948 list_add_tail(&msg->link, timeouts); 4949 if (ent->broadcast) 4950 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4951 else if (is_lan_addr(&ent->recv_msg->addr)) 4952 ipmi_inc_stat(intf, timed_out_lan_commands); 4953 else 4954 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4955 } else { 4956 struct ipmi_smi_msg *smi_msg; 4957 /* More retries, send again. */ 4958 4959 *need_timer = true; 4960 4961 /* 4962 * Start with the max timer, set to normal timer after 4963 * the message is sent. 4964 */ 4965 ent->timeout = MAX_MSG_TIMEOUT; 4966 ent->retries_left--; 4967 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4968 ent->seqid); 4969 if (!smi_msg) { 4970 if (is_lan_addr(&ent->recv_msg->addr)) 4971 ipmi_inc_stat(intf, 4972 dropped_rexmit_lan_commands); 4973 else 4974 ipmi_inc_stat(intf, 4975 dropped_rexmit_ipmb_commands); 4976 return; 4977 } 4978 4979 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4980 4981 /* 4982 * Send the new message. We send with a zero 4983 * priority. It timed out, I doubt time is that 4984 * critical now, and high priority messages are really 4985 * only for messages to the local MC, which don't get 4986 * resent. 4987 */ 4988 if (intf->handlers) { 4989 if (is_lan_addr(&ent->recv_msg->addr)) 4990 ipmi_inc_stat(intf, 4991 retransmitted_lan_commands); 4992 else 4993 ipmi_inc_stat(intf, 4994 retransmitted_ipmb_commands); 4995 4996 smi_send(intf, intf->handlers, smi_msg, 0); 4997 } else 4998 ipmi_free_smi_msg(smi_msg); 4999 5000 spin_lock_irqsave(&intf->seq_lock, *flags); 5001 } 5002 } 5003 5004 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 5005 unsigned long timeout_period) 5006 { 5007 struct list_head timeouts; 5008 struct ipmi_recv_msg *msg, *msg2; 5009 unsigned long flags; 5010 int i; 5011 bool need_timer = false; 5012 5013 if (!intf->bmc_registered) { 5014 kref_get(&intf->refcount); 5015 if (!schedule_work(&intf->bmc_reg_work)) { 5016 kref_put(&intf->refcount, intf_free); 5017 need_timer = true; 5018 } 5019 } 5020 5021 /* 5022 * Go through the seq table and find any messages that 5023 * have timed out, putting them in the timeouts 5024 * list. 5025 */ 5026 INIT_LIST_HEAD(&timeouts); 5027 spin_lock_irqsave(&intf->seq_lock, flags); 5028 if (intf->ipmb_maintenance_mode_timeout) { 5029 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5030 intf->ipmb_maintenance_mode_timeout = 0; 5031 else 5032 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5033 } 5034 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5035 check_msg_timeout(intf, &intf->seq_table[i], 5036 &timeouts, timeout_period, i, 5037 &flags, &need_timer); 5038 spin_unlock_irqrestore(&intf->seq_lock, flags); 5039 5040 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5041 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5042 5043 /* 5044 * Maintenance mode handling. Check the timeout 5045 * optimistically before we claim the lock. It may 5046 * mean a timeout gets missed occasionally, but that 5047 * only means the timeout gets extended by one period 5048 * in that case. No big deal, and it avoids the lock 5049 * most of the time. 5050 */ 5051 if (intf->auto_maintenance_timeout > 0) { 5052 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5053 if (intf->auto_maintenance_timeout > 0) { 5054 intf->auto_maintenance_timeout 5055 -= timeout_period; 5056 if (!intf->maintenance_mode 5057 && (intf->auto_maintenance_timeout <= 0)) { 5058 intf->maintenance_mode_enable = false; 5059 maintenance_mode_update(intf); 5060 } 5061 } 5062 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5063 flags); 5064 } 5065 5066 queue_work(system_wq, &intf->smi_work); 5067 5068 return need_timer; 5069 } 5070 5071 static void ipmi_request_event(struct ipmi_smi *intf) 5072 { 5073 /* No event requests when in maintenance mode. */ 5074 if (intf->maintenance_mode_enable) 5075 return; 5076 5077 if (!intf->in_shutdown) 5078 intf->handlers->request_events(intf->send_info); 5079 } 5080 5081 static struct timer_list ipmi_timer; 5082 5083 static atomic_t stop_operation; 5084 5085 static void ipmi_timeout_work(struct work_struct *work) 5086 { 5087 if (atomic_read(&stop_operation)) 5088 return; 5089 5090 struct ipmi_smi *intf; 5091 bool need_timer = false; 5092 5093 if (atomic_read(&stop_operation)) 5094 return; 5095 5096 mutex_lock(&ipmi_interfaces_mutex); 5097 list_for_each_entry(intf, &ipmi_interfaces, link) { 5098 if (atomic_read(&intf->event_waiters)) { 5099 intf->ticks_to_req_ev--; 5100 if (intf->ticks_to_req_ev == 0) { 5101 ipmi_request_event(intf); 5102 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5103 } 5104 need_timer = true; 5105 } 5106 5107 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5108 } 5109 mutex_unlock(&ipmi_interfaces_mutex); 5110 5111 if (need_timer) 5112 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5113 } 5114 5115 static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work); 5116 5117 static void ipmi_timeout(struct timer_list *unused) 5118 { 5119 if (atomic_read(&stop_operation)) 5120 return; 5121 5122 queue_work(system_wq, &ipmi_timer_work); 5123 } 5124 5125 static void need_waiter(struct ipmi_smi *intf) 5126 { 5127 /* Racy, but worst case we start the timer twice. */ 5128 if (!timer_pending(&ipmi_timer)) 5129 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5130 } 5131 5132 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5133 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5134 5135 static void free_smi_msg(struct ipmi_smi_msg *msg) 5136 { 5137 atomic_dec(&smi_msg_inuse_count); 5138 /* Try to keep as much stuff out of the panic path as possible. */ 5139 if (!oops_in_progress) 5140 kfree(msg); 5141 } 5142 5143 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5144 { 5145 struct ipmi_smi_msg *rv; 5146 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5147 if (rv) { 5148 rv->done = free_smi_msg; 5149 rv->user_data = NULL; 5150 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5151 atomic_inc(&smi_msg_inuse_count); 5152 } 5153 return rv; 5154 } 5155 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5156 5157 static void free_recv_msg(struct ipmi_recv_msg *msg) 5158 { 5159 atomic_dec(&recv_msg_inuse_count); 5160 /* Try to keep as much stuff out of the panic path as possible. */ 5161 if (!oops_in_progress) 5162 kfree(msg); 5163 } 5164 5165 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5166 { 5167 struct ipmi_recv_msg *rv; 5168 5169 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5170 if (rv) { 5171 rv->user = NULL; 5172 rv->done = free_recv_msg; 5173 atomic_inc(&recv_msg_inuse_count); 5174 } 5175 return rv; 5176 } 5177 5178 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5179 { 5180 if (msg->user && !oops_in_progress) 5181 kref_put(&msg->user->refcount, free_ipmi_user); 5182 msg->done(msg); 5183 } 5184 EXPORT_SYMBOL(ipmi_free_recv_msg); 5185 5186 static atomic_t panic_done_count = ATOMIC_INIT(0); 5187 5188 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5189 { 5190 atomic_dec(&panic_done_count); 5191 } 5192 5193 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5194 { 5195 atomic_dec(&panic_done_count); 5196 } 5197 5198 /* 5199 * Inside a panic, send a message and wait for a response. 5200 */ 5201 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5202 struct ipmi_addr *addr, 5203 struct kernel_ipmi_msg *msg) 5204 { 5205 struct ipmi_smi_msg smi_msg; 5206 struct ipmi_recv_msg recv_msg; 5207 int rv; 5208 5209 smi_msg.done = dummy_smi_done_handler; 5210 recv_msg.done = dummy_recv_done_handler; 5211 atomic_add(2, &panic_done_count); 5212 rv = i_ipmi_request(NULL, 5213 intf, 5214 addr, 5215 0, 5216 msg, 5217 intf, 5218 &smi_msg, 5219 &recv_msg, 5220 0, 5221 intf->addrinfo[0].address, 5222 intf->addrinfo[0].lun, 5223 0, 1); /* Don't retry, and don't wait. */ 5224 if (rv) 5225 atomic_sub(2, &panic_done_count); 5226 else if (intf->handlers->flush_messages) 5227 intf->handlers->flush_messages(intf->send_info); 5228 5229 while (atomic_read(&panic_done_count) != 0) 5230 ipmi_poll(intf); 5231 } 5232 5233 static void event_receiver_fetcher(struct ipmi_smi *intf, 5234 struct ipmi_recv_msg *msg) 5235 { 5236 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5237 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5238 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5239 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5240 /* A get event receiver command, save it. */ 5241 intf->event_receiver = msg->msg.data[1]; 5242 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5243 } 5244 } 5245 5246 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5247 { 5248 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5249 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5250 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5251 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5252 /* 5253 * A get device id command, save if we are an event 5254 * receiver or generator. 5255 */ 5256 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5257 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5258 } 5259 } 5260 5261 static void send_panic_events(struct ipmi_smi *intf, char *str) 5262 { 5263 struct kernel_ipmi_msg msg; 5264 unsigned char data[16]; 5265 struct ipmi_system_interface_addr *si; 5266 struct ipmi_addr addr; 5267 char *p = str; 5268 struct ipmi_ipmb_addr *ipmb; 5269 int j; 5270 5271 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5272 return; 5273 5274 si = (struct ipmi_system_interface_addr *) &addr; 5275 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5276 si->channel = IPMI_BMC_CHANNEL; 5277 si->lun = 0; 5278 5279 /* Fill in an event telling that we have failed. */ 5280 msg.netfn = 0x04; /* Sensor or Event. */ 5281 msg.cmd = 2; /* Platform event command. */ 5282 msg.data = data; 5283 msg.data_len = 8; 5284 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5285 data[1] = 0x03; /* This is for IPMI 1.0. */ 5286 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5287 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5288 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5289 5290 /* 5291 * Put a few breadcrumbs in. Hopefully later we can add more things 5292 * to make the panic events more useful. 5293 */ 5294 if (str) { 5295 data[3] = str[0]; 5296 data[6] = str[1]; 5297 data[7] = str[2]; 5298 } 5299 5300 /* Send the event announcing the panic. */ 5301 ipmi_panic_request_and_wait(intf, &addr, &msg); 5302 5303 /* 5304 * On every interface, dump a bunch of OEM event holding the 5305 * string. 5306 */ 5307 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5308 return; 5309 5310 /* 5311 * intf_num is used as an marker to tell if the 5312 * interface is valid. Thus we need a read barrier to 5313 * make sure data fetched before checking intf_num 5314 * won't be used. 5315 */ 5316 smp_rmb(); 5317 5318 /* 5319 * First job here is to figure out where to send the 5320 * OEM events. There's no way in IPMI to send OEM 5321 * events using an event send command, so we have to 5322 * find the SEL to put them in and stick them in 5323 * there. 5324 */ 5325 5326 /* Get capabilities from the get device id. */ 5327 intf->local_sel_device = 0; 5328 intf->local_event_generator = 0; 5329 intf->event_receiver = 0; 5330 5331 /* Request the device info from the local MC. */ 5332 msg.netfn = IPMI_NETFN_APP_REQUEST; 5333 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5334 msg.data = NULL; 5335 msg.data_len = 0; 5336 intf->null_user_handler = device_id_fetcher; 5337 ipmi_panic_request_and_wait(intf, &addr, &msg); 5338 5339 if (intf->local_event_generator) { 5340 /* Request the event receiver from the local MC. */ 5341 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5342 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5343 msg.data = NULL; 5344 msg.data_len = 0; 5345 intf->null_user_handler = event_receiver_fetcher; 5346 ipmi_panic_request_and_wait(intf, &addr, &msg); 5347 } 5348 intf->null_user_handler = NULL; 5349 5350 /* 5351 * Validate the event receiver. The low bit must not 5352 * be 1 (it must be a valid IPMB address), it cannot 5353 * be zero, and it must not be my address. 5354 */ 5355 if (((intf->event_receiver & 1) == 0) 5356 && (intf->event_receiver != 0) 5357 && (intf->event_receiver != intf->addrinfo[0].address)) { 5358 /* 5359 * The event receiver is valid, send an IPMB 5360 * message. 5361 */ 5362 ipmb = (struct ipmi_ipmb_addr *) &addr; 5363 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5364 ipmb->channel = 0; /* FIXME - is this right? */ 5365 ipmb->lun = intf->event_receiver_lun; 5366 ipmb->slave_addr = intf->event_receiver; 5367 } else if (intf->local_sel_device) { 5368 /* 5369 * The event receiver was not valid (or was 5370 * me), but I am an SEL device, just dump it 5371 * in my SEL. 5372 */ 5373 si = (struct ipmi_system_interface_addr *) &addr; 5374 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5375 si->channel = IPMI_BMC_CHANNEL; 5376 si->lun = 0; 5377 } else 5378 return; /* No where to send the event. */ 5379 5380 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5381 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5382 msg.data = data; 5383 msg.data_len = 16; 5384 5385 j = 0; 5386 while (*p) { 5387 int size = strnlen(p, 11); 5388 5389 data[0] = 0; 5390 data[1] = 0; 5391 data[2] = 0xf0; /* OEM event without timestamp. */ 5392 data[3] = intf->addrinfo[0].address; 5393 data[4] = j++; /* sequence # */ 5394 5395 memcpy_and_pad(data+5, 11, p, size, '\0'); 5396 p += size; 5397 5398 ipmi_panic_request_and_wait(intf, &addr, &msg); 5399 } 5400 } 5401 5402 static int has_panicked; 5403 5404 static int panic_event(struct notifier_block *this, 5405 unsigned long event, 5406 void *ptr) 5407 { 5408 struct ipmi_smi *intf; 5409 struct ipmi_user *user; 5410 5411 if (has_panicked) 5412 return NOTIFY_DONE; 5413 has_panicked = 1; 5414 5415 /* For every registered interface, set it to run to completion. */ 5416 list_for_each_entry(intf, &ipmi_interfaces, link) { 5417 if (!intf->handlers || intf->intf_num == -1) 5418 /* Interface is not ready. */ 5419 continue; 5420 5421 if (!intf->handlers->poll) 5422 continue; 5423 5424 /* 5425 * If we were interrupted while locking xmit_msgs_lock or 5426 * waiting_rcv_msgs_lock, the corresponding list may be 5427 * corrupted. In this case, drop items on the list for 5428 * the safety. 5429 */ 5430 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5431 INIT_LIST_HEAD(&intf->xmit_msgs); 5432 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5433 } else 5434 spin_unlock(&intf->xmit_msgs_lock); 5435 5436 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5437 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5438 else 5439 spin_unlock(&intf->waiting_rcv_msgs_lock); 5440 5441 intf->run_to_completion = 1; 5442 if (intf->handlers->set_run_to_completion) 5443 intf->handlers->set_run_to_completion(intf->send_info, 5444 1); 5445 5446 list_for_each_entry(user, &intf->users, link) { 5447 if (user->handler->ipmi_panic_handler) 5448 user->handler->ipmi_panic_handler( 5449 user->handler_data); 5450 } 5451 5452 send_panic_events(intf, ptr); 5453 } 5454 5455 return NOTIFY_DONE; 5456 } 5457 5458 /* Must be called with ipmi_interfaces_mutex held. */ 5459 static int ipmi_register_driver(void) 5460 { 5461 int rv; 5462 5463 if (drvregistered) 5464 return 0; 5465 5466 rv = driver_register(&ipmidriver.driver); 5467 if (rv) 5468 pr_err("Could not register IPMI driver\n"); 5469 else 5470 drvregistered = true; 5471 return rv; 5472 } 5473 5474 static struct notifier_block panic_block = { 5475 .notifier_call = panic_event, 5476 .next = NULL, 5477 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5478 }; 5479 5480 static int ipmi_init_msghandler(void) 5481 { 5482 int rv; 5483 5484 mutex_lock(&ipmi_interfaces_mutex); 5485 rv = ipmi_register_driver(); 5486 if (rv) 5487 goto out; 5488 if (initialized) 5489 goto out; 5490 5491 bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5492 if (!bmc_remove_work_wq) { 5493 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5494 rv = -ENOMEM; 5495 goto out; 5496 } 5497 5498 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5499 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5500 5501 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5502 5503 initialized = true; 5504 5505 out: 5506 mutex_unlock(&ipmi_interfaces_mutex); 5507 return rv; 5508 } 5509 5510 static int __init ipmi_init_msghandler_mod(void) 5511 { 5512 int rv; 5513 5514 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5515 5516 mutex_lock(&ipmi_interfaces_mutex); 5517 rv = ipmi_register_driver(); 5518 mutex_unlock(&ipmi_interfaces_mutex); 5519 5520 return rv; 5521 } 5522 5523 static void __exit cleanup_ipmi(void) 5524 { 5525 int count; 5526 5527 if (initialized) { 5528 destroy_workqueue(bmc_remove_work_wq); 5529 5530 atomic_notifier_chain_unregister(&panic_notifier_list, 5531 &panic_block); 5532 5533 /* 5534 * This can't be called if any interfaces exist, so no worry 5535 * about shutting down the interfaces. 5536 */ 5537 5538 /* 5539 * Tell the timer to stop, then wait for it to stop. This 5540 * avoids problems with race conditions removing the timer 5541 * here. 5542 */ 5543 atomic_set(&stop_operation, 1); 5544 timer_delete_sync(&ipmi_timer); 5545 cancel_work_sync(&ipmi_timer_work); 5546 5547 initialized = false; 5548 5549 /* Check for buffer leaks. */ 5550 count = atomic_read(&smi_msg_inuse_count); 5551 if (count != 0) 5552 pr_warn("SMI message count %d at exit\n", count); 5553 count = atomic_read(&recv_msg_inuse_count); 5554 if (count != 0) 5555 pr_warn("recv message count %d at exit\n", count); 5556 } 5557 if (drvregistered) 5558 driver_unregister(&ipmidriver.driver); 5559 } 5560 module_exit(cleanup_ipmi); 5561 5562 module_init(ipmi_init_msghandler_mod); 5563 MODULE_LICENSE("GPL"); 5564 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5565 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5566 MODULE_VERSION(IPMI_DRIVER_VERSION); 5567 MODULE_SOFTDEP("post: ipmi_devintf"); 5568