1 // SPDX-License-Identifier: GPL-2.0+
5 * Incoming and outgoing message routing for an IPMI interface.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35 #include <linux/nospec.h>
36 #include <linux/vmalloc.h>
37 #include <linux/delay.h>
39 #define IPMI_DRIVER_VERSION "39.2"
41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
42 static int ipmi_init_msghandler(void);
43 static void smi_recv_tasklet(struct tasklet_struct *t);
44 static void handle_new_recv_msgs(struct ipmi_smi *intf);
45 static void need_waiter(struct ipmi_smi *intf);
46 static int handle_one_recv_msg(struct ipmi_smi *intf,
47 struct ipmi_smi_msg *msg);
49 static bool initialized;
50 static bool drvregistered;
52 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
53 enum ipmi_panic_event_op {
54 IPMI_SEND_PANIC_EVENT_NONE,
55 IPMI_SEND_PANIC_EVENT,
56 IPMI_SEND_PANIC_EVENT_STRING,
57 IPMI_SEND_PANIC_EVENT_MAX
60 /* Indices in this array should be mapped to enum ipmi_panic_event_op */
61 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
63 #ifdef CONFIG_IPMI_PANIC_STRING
64 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
65 #elif defined(CONFIG_IPMI_PANIC_EVENT)
66 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
68 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
71 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
73 static int panic_op_write_handler(const char *val,
74 const struct kernel_param *kp)
79 strscpy(valcp, val, sizeof(valcp));
80 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
84 ipmi_send_panic_event = e;
88 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
90 const char *event_str;
92 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
95 event_str = ipmi_panic_event_str[ipmi_send_panic_event];
97 return sprintf(buffer, "%s\n", event_str);
100 static const struct kernel_param_ops panic_op_ops = {
101 .set = panic_op_write_handler,
102 .get = panic_op_read_handler
104 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
105 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
108 #define MAX_EVENTS_IN_QUEUE 25
110 /* Remain in auto-maintenance mode for this amount of time (in ms). */
111 static unsigned long maintenance_mode_timeout_ms = 30000;
112 module_param(maintenance_mode_timeout_ms, ulong, 0644);
113 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
114 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
117 * Don't let a message sit in a queue forever, always time it with at lest
118 * the max message timer. This is in milliseconds.
120 #define MAX_MSG_TIMEOUT 60000
123 * Timeout times below are in milliseconds, and are done off a 1
124 * second timer. So setting the value to 1000 would mean anything
125 * between 0 and 1000ms. So really the only reasonable minimum
126 * setting it 2000ms, which is between 1 and 2 seconds.
129 /* The default timeout for message retries. */
130 static unsigned long default_retry_ms = 2000;
131 module_param(default_retry_ms, ulong, 0644);
132 MODULE_PARM_DESC(default_retry_ms,
133 "The time (milliseconds) between retry sends");
135 /* The default timeout for maintenance mode message retries. */
136 static unsigned long default_maintenance_retry_ms = 3000;
137 module_param(default_maintenance_retry_ms, ulong, 0644);
138 MODULE_PARM_DESC(default_maintenance_retry_ms,
139 "The time (milliseconds) between retry sends in maintenance mode");
141 /* The default maximum number of retries */
142 static unsigned int default_max_retries = 4;
143 module_param(default_max_retries, uint, 0644);
144 MODULE_PARM_DESC(default_max_retries,
145 "The time (milliseconds) between retry sends in maintenance mode");
147 /* Call every ~1000 ms. */
148 #define IPMI_TIMEOUT_TIME 1000
150 /* How many jiffies does it take to get to the timeout time. */
151 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
154 * Request events from the queue every second (this is the number of
155 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
156 * future, IPMI will add a way to know immediately if an event is in
157 * the queue and this silliness can go away.
159 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
161 /* How long should we cache dynamic device IDs? */
162 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
165 * The main "user" data structure.
168 struct list_head link;
171 * Set to NULL when the user is destroyed, a pointer to myself
172 * so srcu_dereference can be used on it.
174 struct ipmi_user *self;
175 struct srcu_struct release_barrier;
177 struct kref refcount;
179 /* The upper layer that handles receive messages. */
180 const struct ipmi_user_hndl *handler;
183 /* The interface this user is bound to. */
184 struct ipmi_smi *intf;
186 /* Does this interface receive IPMI events? */
189 /* Free must run in process context for RCU cleanup. */
190 struct work_struct remove_work;
193 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
194 __acquires(user->release_barrier)
196 struct ipmi_user *ruser;
198 *index = srcu_read_lock(&user->release_barrier);
199 ruser = srcu_dereference(user->self, &user->release_barrier);
201 srcu_read_unlock(&user->release_barrier, *index);
205 static void release_ipmi_user(struct ipmi_user *user, int index)
207 srcu_read_unlock(&user->release_barrier, index);
211 struct list_head link;
213 struct ipmi_user *user;
219 * This is used to form a linked lised during mass deletion.
220 * Since this is in an RCU list, we cannot use the link above
221 * or change any data until the RCU period completes. So we
222 * use this next variable during mass deletion so we can have
223 * a list and don't have to wait and restart the search on
224 * every individual deletion of a command.
226 struct cmd_rcvr *next;
230 unsigned int inuse : 1;
231 unsigned int broadcast : 1;
233 unsigned long timeout;
234 unsigned long orig_timeout;
235 unsigned int retries_left;
238 * To verify on an incoming send message response that this is
239 * the message that the response is for, we keep a sequence id
240 * and increment it every time we send a message.
245 * This is held so we can properly respond to the message on a
246 * timeout, and it is used to hold the temporary data for
247 * retransmission, too.
249 struct ipmi_recv_msg *recv_msg;
253 * Store the information in a msgid (long) to allow us to find a
254 * sequence table entry from the msgid.
256 #define STORE_SEQ_IN_MSGID(seq, seqid) \
257 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
259 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
261 seq = (((msgid) >> 26) & 0x3f); \
262 seqid = ((msgid) & 0x3ffffff); \
265 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
267 #define IPMI_MAX_CHANNELS 16
268 struct ipmi_channel {
269 unsigned char medium;
270 unsigned char protocol;
273 struct ipmi_channel_set {
274 struct ipmi_channel c[IPMI_MAX_CHANNELS];
277 struct ipmi_my_addrinfo {
279 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
280 * but may be changed by the user.
282 unsigned char address;
285 * My LUN. This should generally stay the SMS LUN, but just in
292 * Note that the product id, manufacturer id, guid, and device id are
293 * immutable in this structure, so dyn_mutex is not required for
294 * accessing those. If those change on a BMC, a new BMC is allocated.
297 struct platform_device pdev;
298 struct list_head intfs; /* Interfaces on this BMC. */
299 struct ipmi_device_id id;
300 struct ipmi_device_id fetch_id;
302 unsigned long dyn_id_expiry;
303 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
307 struct kref usecount;
308 struct work_struct remove_work;
309 unsigned char cc; /* completion code */
311 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
313 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
314 struct ipmi_device_id *id,
315 bool *guid_set, guid_t *guid);
318 * Various statistics for IPMI, these index stats[] in the ipmi_smi
321 enum ipmi_stat_indexes {
322 /* Commands we got from the user that were invalid. */
323 IPMI_STAT_sent_invalid_commands = 0,
325 /* Commands we sent to the MC. */
326 IPMI_STAT_sent_local_commands,
328 /* Responses from the MC that were delivered to a user. */
329 IPMI_STAT_handled_local_responses,
331 /* Responses from the MC that were not delivered to a user. */
332 IPMI_STAT_unhandled_local_responses,
334 /* Commands we sent out to the IPMB bus. */
335 IPMI_STAT_sent_ipmb_commands,
337 /* Commands sent on the IPMB that had errors on the SEND CMD */
338 IPMI_STAT_sent_ipmb_command_errs,
340 /* Each retransmit increments this count. */
341 IPMI_STAT_retransmitted_ipmb_commands,
344 * When a message times out (runs out of retransmits) this is
347 IPMI_STAT_timed_out_ipmb_commands,
350 * This is like above, but for broadcasts. Broadcasts are
351 * *not* included in the above count (they are expected to
354 IPMI_STAT_timed_out_ipmb_broadcasts,
356 /* Responses I have sent to the IPMB bus. */
357 IPMI_STAT_sent_ipmb_responses,
359 /* The response was delivered to the user. */
360 IPMI_STAT_handled_ipmb_responses,
362 /* The response had invalid data in it. */
363 IPMI_STAT_invalid_ipmb_responses,
365 /* The response didn't have anyone waiting for it. */
366 IPMI_STAT_unhandled_ipmb_responses,
368 /* Commands we sent out to the IPMB bus. */
369 IPMI_STAT_sent_lan_commands,
371 /* Commands sent on the IPMB that had errors on the SEND CMD */
372 IPMI_STAT_sent_lan_command_errs,
374 /* Each retransmit increments this count. */
375 IPMI_STAT_retransmitted_lan_commands,
378 * When a message times out (runs out of retransmits) this is
381 IPMI_STAT_timed_out_lan_commands,
383 /* Responses I have sent to the IPMB bus. */
384 IPMI_STAT_sent_lan_responses,
386 /* The response was delivered to the user. */
387 IPMI_STAT_handled_lan_responses,
389 /* The response had invalid data in it. */
390 IPMI_STAT_invalid_lan_responses,
392 /* The response didn't have anyone waiting for it. */
393 IPMI_STAT_unhandled_lan_responses,
395 /* The command was delivered to the user. */
396 IPMI_STAT_handled_commands,
398 /* The command had invalid data in it. */
399 IPMI_STAT_invalid_commands,
401 /* The command didn't have anyone waiting for it. */
402 IPMI_STAT_unhandled_commands,
404 /* Invalid data in an event. */
405 IPMI_STAT_invalid_events,
407 /* Events that were received with the proper format. */
410 /* Retransmissions on IPMB that failed. */
411 IPMI_STAT_dropped_rexmit_ipmb_commands,
413 /* Retransmissions on LAN that failed. */
414 IPMI_STAT_dropped_rexmit_lan_commands,
416 /* This *must* remain last, add new values above this. */
421 #define IPMI_IPMB_NUM_SEQ 64
423 struct module *owner;
425 /* What interface number are we? */
428 struct kref refcount;
430 /* Set when the interface is being unregistered. */
433 /* Used for a list of interfaces. */
434 struct list_head link;
437 * The list of upper layers that are using me. seq_lock write
438 * protects this. Read protection is with srcu.
440 struct list_head users;
441 struct srcu_struct users_srcu;
443 /* Used for wake ups at startup. */
444 wait_queue_head_t waitq;
447 * Prevents the interface from being unregistered when the
448 * interface is used by being looked up through the BMC
451 struct mutex bmc_reg_mutex;
453 struct bmc_device tmp_bmc;
454 struct bmc_device *bmc;
456 struct list_head bmc_link;
458 bool in_bmc_register; /* Handle recursive situations. Yuck. */
459 struct work_struct bmc_reg_work;
461 const struct ipmi_smi_handlers *handlers;
464 /* Driver-model device for the system interface. */
465 struct device *si_dev;
468 * A table of sequence numbers for this interface. We use the
469 * sequence numbers for IPMB messages that go out of the
470 * interface to match them up with their responses. A routine
471 * is called periodically to time the items in this list.
474 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
478 * Messages queued for delivery. If delivery fails (out of memory
479 * for instance), They will stay in here to be processed later in a
480 * periodic timer interrupt. The tasklet is for handling received
481 * messages directly from the handler.
483 spinlock_t waiting_rcv_msgs_lock;
484 struct list_head waiting_rcv_msgs;
485 atomic_t watchdog_pretimeouts_to_deliver;
486 struct tasklet_struct recv_tasklet;
488 spinlock_t xmit_msgs_lock;
489 struct list_head xmit_msgs;
490 struct ipmi_smi_msg *curr_msg;
491 struct list_head hp_xmit_msgs;
494 * The list of command receivers that are registered for commands
497 struct mutex cmd_rcvrs_mutex;
498 struct list_head cmd_rcvrs;
501 * Events that were queues because no one was there to receive
504 spinlock_t events_lock; /* For dealing with event stuff. */
505 struct list_head waiting_events;
506 unsigned int waiting_events_count; /* How many events in queue? */
507 char delivering_events;
508 char event_msg_printed;
510 /* How many users are waiting for events? */
511 atomic_t event_waiters;
512 unsigned int ticks_to_req_ev;
514 spinlock_t watch_lock; /* For dealing with watch stuff below. */
516 /* How many users are waiting for commands? */
517 unsigned int command_waiters;
519 /* How many users are waiting for watchdogs? */
520 unsigned int watchdog_waiters;
522 /* How many users are waiting for message responses? */
523 unsigned int response_waiters;
526 * Tells what the lower layer has last been asked to watch for,
527 * messages and/or watchdogs. Protected by watch_lock.
529 unsigned int last_watch_mask;
532 * The event receiver for my BMC, only really used at panic
533 * shutdown as a place to store this.
535 unsigned char event_receiver;
536 unsigned char event_receiver_lun;
537 unsigned char local_sel_device;
538 unsigned char local_event_generator;
540 /* For handling of maintenance mode. */
541 int maintenance_mode;
542 bool maintenance_mode_enable;
543 int auto_maintenance_timeout;
544 spinlock_t maintenance_mode_lock; /* Used in a timer... */
547 * If we are doing maintenance on something on IPMB, extend
548 * the timeout time to avoid timeouts writing firmware and
551 int ipmb_maintenance_mode_timeout;
554 * A cheap hack, if this is non-null and a message to an
555 * interface comes in with a NULL user, call this routine with
556 * it. Note that the message will still be freed by the
557 * caller. This only works on the system interface.
559 * Protected by bmc_reg_mutex.
561 void (*null_user_handler)(struct ipmi_smi *intf,
562 struct ipmi_recv_msg *msg);
565 * When we are scanning the channels for an SMI, this will
566 * tell which channel we are scanning.
570 /* Channel information */
571 struct ipmi_channel_set *channel_list;
572 unsigned int curr_working_cset; /* First index into the following. */
573 struct ipmi_channel_set wchannels[2];
574 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
577 atomic_t stats[IPMI_NUM_STATS];
580 * run_to_completion duplicate of smb_info, smi_info
581 * and ipmi_serial_info structures. Used to decrease numbers of
582 * parameters passed by "low" level IPMI code.
584 int run_to_completion;
586 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
588 static void __get_guid(struct ipmi_smi *intf);
589 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
590 static int __ipmi_bmc_register(struct ipmi_smi *intf,
591 struct ipmi_device_id *id,
592 bool guid_set, guid_t *guid, int intf_num);
593 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
597 * The driver model view of the IPMI messaging driver.
599 static struct platform_driver ipmidriver = {
602 .bus = &platform_bus_type
606 * This mutex keeps us from adding the same BMC twice.
608 static DEFINE_MUTEX(ipmidriver_mutex);
610 static LIST_HEAD(ipmi_interfaces);
611 static DEFINE_MUTEX(ipmi_interfaces_mutex);
612 #define ipmi_interfaces_mutex_held() \
613 lockdep_is_held(&ipmi_interfaces_mutex)
614 static struct srcu_struct ipmi_interfaces_srcu;
617 * List of watchers that want to know when smi's are added and deleted.
619 static LIST_HEAD(smi_watchers);
620 static DEFINE_MUTEX(smi_watchers_mutex);
622 #define ipmi_inc_stat(intf, stat) \
623 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
624 #define ipmi_get_stat(intf, stat) \
625 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
627 static const char * const addr_src_to_str[] = {
628 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
629 "device-tree", "platform"
632 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
635 src = 0; /* Invalid */
636 return addr_src_to_str[src];
638 EXPORT_SYMBOL(ipmi_addr_src_to_str);
640 static int is_lan_addr(struct ipmi_addr *addr)
642 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
645 static int is_ipmb_addr(struct ipmi_addr *addr)
647 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
650 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
652 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
655 static void free_recv_msg_list(struct list_head *q)
657 struct ipmi_recv_msg *msg, *msg2;
659 list_for_each_entry_safe(msg, msg2, q, link) {
660 list_del(&msg->link);
661 ipmi_free_recv_msg(msg);
665 static void free_smi_msg_list(struct list_head *q)
667 struct ipmi_smi_msg *msg, *msg2;
669 list_for_each_entry_safe(msg, msg2, q, link) {
670 list_del(&msg->link);
671 ipmi_free_smi_msg(msg);
675 static void clean_up_interface_data(struct ipmi_smi *intf)
678 struct cmd_rcvr *rcvr, *rcvr2;
679 struct list_head list;
681 tasklet_kill(&intf->recv_tasklet);
683 free_smi_msg_list(&intf->waiting_rcv_msgs);
684 free_recv_msg_list(&intf->waiting_events);
687 * Wholesale remove all the entries from the list in the
688 * interface and wait for RCU to know that none are in use.
690 mutex_lock(&intf->cmd_rcvrs_mutex);
691 INIT_LIST_HEAD(&list);
692 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
693 mutex_unlock(&intf->cmd_rcvrs_mutex);
695 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
698 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
699 if ((intf->seq_table[i].inuse)
700 && (intf->seq_table[i].recv_msg))
701 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
705 static void intf_free(struct kref *ref)
707 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
709 clean_up_interface_data(intf);
713 struct watcher_entry {
715 struct ipmi_smi *intf;
716 struct list_head link;
719 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
721 struct ipmi_smi *intf;
725 * Make sure the driver is actually initialized, this handles
726 * problems with initialization order.
728 rv = ipmi_init_msghandler();
732 mutex_lock(&smi_watchers_mutex);
734 list_add(&watcher->link, &smi_watchers);
736 index = srcu_read_lock(&ipmi_interfaces_srcu);
737 list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
738 lockdep_is_held(&smi_watchers_mutex)) {
739 int intf_num = READ_ONCE(intf->intf_num);
743 watcher->new_smi(intf_num, intf->si_dev);
745 srcu_read_unlock(&ipmi_interfaces_srcu, index);
747 mutex_unlock(&smi_watchers_mutex);
751 EXPORT_SYMBOL(ipmi_smi_watcher_register);
753 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
755 mutex_lock(&smi_watchers_mutex);
756 list_del(&watcher->link);
757 mutex_unlock(&smi_watchers_mutex);
760 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
763 * Must be called with smi_watchers_mutex held.
766 call_smi_watchers(int i, struct device *dev)
768 struct ipmi_smi_watcher *w;
770 mutex_lock(&smi_watchers_mutex);
771 list_for_each_entry(w, &smi_watchers, link) {
772 if (try_module_get(w->owner)) {
774 module_put(w->owner);
777 mutex_unlock(&smi_watchers_mutex);
781 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
783 if (addr1->addr_type != addr2->addr_type)
786 if (addr1->channel != addr2->channel)
789 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
790 struct ipmi_system_interface_addr *smi_addr1
791 = (struct ipmi_system_interface_addr *) addr1;
792 struct ipmi_system_interface_addr *smi_addr2
793 = (struct ipmi_system_interface_addr *) addr2;
794 return (smi_addr1->lun == smi_addr2->lun);
797 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
798 struct ipmi_ipmb_addr *ipmb_addr1
799 = (struct ipmi_ipmb_addr *) addr1;
800 struct ipmi_ipmb_addr *ipmb_addr2
801 = (struct ipmi_ipmb_addr *) addr2;
803 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
804 && (ipmb_addr1->lun == ipmb_addr2->lun));
807 if (is_lan_addr(addr1)) {
808 struct ipmi_lan_addr *lan_addr1
809 = (struct ipmi_lan_addr *) addr1;
810 struct ipmi_lan_addr *lan_addr2
811 = (struct ipmi_lan_addr *) addr2;
813 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
814 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
815 && (lan_addr1->session_handle
816 == lan_addr2->session_handle)
817 && (lan_addr1->lun == lan_addr2->lun));
823 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
825 if (len < sizeof(struct ipmi_system_interface_addr))
828 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
829 if (addr->channel != IPMI_BMC_CHANNEL)
834 if ((addr->channel == IPMI_BMC_CHANNEL)
835 || (addr->channel >= IPMI_MAX_CHANNELS)
836 || (addr->channel < 0))
839 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
840 if (len < sizeof(struct ipmi_ipmb_addr))
845 if (is_lan_addr(addr)) {
846 if (len < sizeof(struct ipmi_lan_addr))
853 EXPORT_SYMBOL(ipmi_validate_addr);
855 unsigned int ipmi_addr_length(int addr_type)
857 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
858 return sizeof(struct ipmi_system_interface_addr);
860 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
861 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
862 return sizeof(struct ipmi_ipmb_addr);
864 if (addr_type == IPMI_LAN_ADDR_TYPE)
865 return sizeof(struct ipmi_lan_addr);
869 EXPORT_SYMBOL(ipmi_addr_length);
871 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
876 /* Special handling for NULL users. */
877 if (intf->null_user_handler) {
878 intf->null_user_handler(intf, msg);
880 /* No handler, so give up. */
883 ipmi_free_recv_msg(msg);
884 } else if (oops_in_progress) {
886 * If we are running in the panic context, calling the
887 * receive handler doesn't much meaning and has a deadlock
888 * risk. At this moment, simply skip it in that case.
890 ipmi_free_recv_msg(msg);
893 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
896 user->handler->ipmi_recv_hndl(msg, user->handler_data);
897 release_ipmi_user(user, index);
899 /* User went away, give up. */
900 ipmi_free_recv_msg(msg);
908 static void deliver_local_response(struct ipmi_smi *intf,
909 struct ipmi_recv_msg *msg)
911 if (deliver_response(intf, msg))
912 ipmi_inc_stat(intf, unhandled_local_responses);
914 ipmi_inc_stat(intf, handled_local_responses);
917 static void deliver_err_response(struct ipmi_smi *intf,
918 struct ipmi_recv_msg *msg, int err)
920 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
921 msg->msg_data[0] = err;
922 msg->msg.netfn |= 1; /* Convert to a response. */
923 msg->msg.data_len = 1;
924 msg->msg.data = msg->msg_data;
925 deliver_local_response(intf, msg);
928 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
930 unsigned long iflags;
932 if (!intf->handlers->set_need_watch)
935 spin_lock_irqsave(&intf->watch_lock, iflags);
936 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
937 intf->response_waiters++;
939 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
940 intf->watchdog_waiters++;
942 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
943 intf->command_waiters++;
945 if ((intf->last_watch_mask & flags) != flags) {
946 intf->last_watch_mask |= flags;
947 intf->handlers->set_need_watch(intf->send_info,
948 intf->last_watch_mask);
950 spin_unlock_irqrestore(&intf->watch_lock, iflags);
953 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
955 unsigned long iflags;
957 if (!intf->handlers->set_need_watch)
960 spin_lock_irqsave(&intf->watch_lock, iflags);
961 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
962 intf->response_waiters--;
964 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
965 intf->watchdog_waiters--;
967 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
968 intf->command_waiters--;
971 if (intf->response_waiters)
972 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
973 if (intf->watchdog_waiters)
974 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
975 if (intf->command_waiters)
976 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
978 if (intf->last_watch_mask != flags) {
979 intf->last_watch_mask = flags;
980 intf->handlers->set_need_watch(intf->send_info,
981 intf->last_watch_mask);
983 spin_unlock_irqrestore(&intf->watch_lock, iflags);
987 * Find the next sequence number not being used and add the given
988 * message with the given timeout to the sequence table. This must be
989 * called with the interface's seq_lock held.
991 static int intf_next_seq(struct ipmi_smi *intf,
992 struct ipmi_recv_msg *recv_msg,
993 unsigned long timeout,
1003 timeout = default_retry_ms;
1005 retries = default_max_retries;
1007 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1008 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1009 if (!intf->seq_table[i].inuse)
1013 if (!intf->seq_table[i].inuse) {
1014 intf->seq_table[i].recv_msg = recv_msg;
1017 * Start with the maximum timeout, when the send response
1018 * comes in we will start the real timer.
1020 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1021 intf->seq_table[i].orig_timeout = timeout;
1022 intf->seq_table[i].retries_left = retries;
1023 intf->seq_table[i].broadcast = broadcast;
1024 intf->seq_table[i].inuse = 1;
1025 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1027 *seqid = intf->seq_table[i].seqid;
1028 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1029 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1039 * Return the receive message for the given sequence number and
1040 * release the sequence number so it can be reused. Some other data
1041 * is passed in to be sure the message matches up correctly (to help
1042 * guard against message coming in after their timeout and the
1043 * sequence number being reused).
1045 static int intf_find_seq(struct ipmi_smi *intf,
1049 unsigned char netfn,
1050 struct ipmi_addr *addr,
1051 struct ipmi_recv_msg **recv_msg)
1054 unsigned long flags;
1056 if (seq >= IPMI_IPMB_NUM_SEQ)
1059 spin_lock_irqsave(&intf->seq_lock, flags);
1060 if (intf->seq_table[seq].inuse) {
1061 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1063 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1064 && (msg->msg.netfn == netfn)
1065 && (ipmi_addr_equal(addr, &msg->addr))) {
1067 intf->seq_table[seq].inuse = 0;
1068 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1072 spin_unlock_irqrestore(&intf->seq_lock, flags);
1078 /* Start the timer for a specific sequence table entry. */
1079 static int intf_start_seq_timer(struct ipmi_smi *intf,
1083 unsigned long flags;
1085 unsigned long seqid;
1088 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1090 spin_lock_irqsave(&intf->seq_lock, flags);
1092 * We do this verification because the user can be deleted
1093 * while a message is outstanding.
1095 if ((intf->seq_table[seq].inuse)
1096 && (intf->seq_table[seq].seqid == seqid)) {
1097 struct seq_table *ent = &intf->seq_table[seq];
1098 ent->timeout = ent->orig_timeout;
1101 spin_unlock_irqrestore(&intf->seq_lock, flags);
1106 /* Got an error for the send message for a specific sequence number. */
1107 static int intf_err_seq(struct ipmi_smi *intf,
1112 unsigned long flags;
1114 unsigned long seqid;
1115 struct ipmi_recv_msg *msg = NULL;
1118 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1120 spin_lock_irqsave(&intf->seq_lock, flags);
1122 * We do this verification because the user can be deleted
1123 * while a message is outstanding.
1125 if ((intf->seq_table[seq].inuse)
1126 && (intf->seq_table[seq].seqid == seqid)) {
1127 struct seq_table *ent = &intf->seq_table[seq];
1130 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1131 msg = ent->recv_msg;
1134 spin_unlock_irqrestore(&intf->seq_lock, flags);
1137 deliver_err_response(intf, msg, err);
1142 static void free_user_work(struct work_struct *work)
1144 struct ipmi_user *user = container_of(work, struct ipmi_user,
1147 cleanup_srcu_struct(&user->release_barrier);
1151 int ipmi_create_user(unsigned int if_num,
1152 const struct ipmi_user_hndl *handler,
1154 struct ipmi_user **user)
1156 unsigned long flags;
1157 struct ipmi_user *new_user;
1159 struct ipmi_smi *intf;
1162 * There is no module usecount here, because it's not
1163 * required. Since this can only be used by and called from
1164 * other modules, they will implicitly use this module, and
1165 * thus this can't be removed unless the other modules are
1169 if (handler == NULL)
1173 * Make sure the driver is actually initialized, this handles
1174 * problems with initialization order.
1176 rv = ipmi_init_msghandler();
1180 new_user = vzalloc(sizeof(*new_user));
1184 index = srcu_read_lock(&ipmi_interfaces_srcu);
1185 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1186 if (intf->intf_num == if_num)
1189 /* Not found, return an error */
1194 INIT_WORK(&new_user->remove_work, free_user_work);
1196 rv = init_srcu_struct(&new_user->release_barrier);
1200 if (!try_module_get(intf->owner)) {
1205 /* Note that each existing user holds a refcount to the interface. */
1206 kref_get(&intf->refcount);
1208 kref_init(&new_user->refcount);
1209 new_user->handler = handler;
1210 new_user->handler_data = handler_data;
1211 new_user->intf = intf;
1212 new_user->gets_events = false;
1214 rcu_assign_pointer(new_user->self, new_user);
1215 spin_lock_irqsave(&intf->seq_lock, flags);
1216 list_add_rcu(&new_user->link, &intf->users);
1217 spin_unlock_irqrestore(&intf->seq_lock, flags);
1218 if (handler->ipmi_watchdog_pretimeout)
1219 /* User wants pretimeouts, so make sure to watch for them. */
1220 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1221 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1226 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1230 EXPORT_SYMBOL(ipmi_create_user);
1232 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1235 struct ipmi_smi *intf;
1237 index = srcu_read_lock(&ipmi_interfaces_srcu);
1238 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1239 if (intf->intf_num == if_num)
1242 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1244 /* Not found, return an error */
1248 if (!intf->handlers->get_smi_info)
1251 rv = intf->handlers->get_smi_info(intf->send_info, data);
1252 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1256 EXPORT_SYMBOL(ipmi_get_smi_info);
1258 static void free_user(struct kref *ref)
1260 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1262 /* SRCU cleanup must happen in task context. */
1263 schedule_work(&user->remove_work);
1266 static void _ipmi_destroy_user(struct ipmi_user *user)
1268 struct ipmi_smi *intf = user->intf;
1270 unsigned long flags;
1271 struct cmd_rcvr *rcvr;
1272 struct cmd_rcvr *rcvrs = NULL;
1274 if (!acquire_ipmi_user(user, &i)) {
1276 * The user has already been cleaned up, just make sure
1277 * nothing is using it and return.
1279 synchronize_srcu(&user->release_barrier);
1283 rcu_assign_pointer(user->self, NULL);
1284 release_ipmi_user(user, i);
1286 synchronize_srcu(&user->release_barrier);
1288 if (user->handler->shutdown)
1289 user->handler->shutdown(user->handler_data);
1291 if (user->handler->ipmi_watchdog_pretimeout)
1292 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1294 if (user->gets_events)
1295 atomic_dec(&intf->event_waiters);
1297 /* Remove the user from the interface's sequence table. */
1298 spin_lock_irqsave(&intf->seq_lock, flags);
1299 list_del_rcu(&user->link);
1301 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1302 if (intf->seq_table[i].inuse
1303 && (intf->seq_table[i].recv_msg->user == user)) {
1304 intf->seq_table[i].inuse = 0;
1305 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1306 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1309 spin_unlock_irqrestore(&intf->seq_lock, flags);
1312 * Remove the user from the command receiver's table. First
1313 * we build a list of everything (not using the standard link,
1314 * since other things may be using it till we do
1315 * synchronize_srcu()) then free everything in that list.
1317 mutex_lock(&intf->cmd_rcvrs_mutex);
1318 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1319 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1320 if (rcvr->user == user) {
1321 list_del_rcu(&rcvr->link);
1326 mutex_unlock(&intf->cmd_rcvrs_mutex);
1334 kref_put(&intf->refcount, intf_free);
1335 module_put(intf->owner);
1338 int ipmi_destroy_user(struct ipmi_user *user)
1340 _ipmi_destroy_user(user);
1342 kref_put(&user->refcount, free_user);
1346 EXPORT_SYMBOL(ipmi_destroy_user);
1348 int ipmi_get_version(struct ipmi_user *user,
1349 unsigned char *major,
1350 unsigned char *minor)
1352 struct ipmi_device_id id;
1355 user = acquire_ipmi_user(user, &index);
1359 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1361 *major = ipmi_version_major(&id);
1362 *minor = ipmi_version_minor(&id);
1364 release_ipmi_user(user, index);
1368 EXPORT_SYMBOL(ipmi_get_version);
1370 int ipmi_set_my_address(struct ipmi_user *user,
1371 unsigned int channel,
1372 unsigned char address)
1376 user = acquire_ipmi_user(user, &index);
1380 if (channel >= IPMI_MAX_CHANNELS) {
1383 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1384 user->intf->addrinfo[channel].address = address;
1386 release_ipmi_user(user, index);
1390 EXPORT_SYMBOL(ipmi_set_my_address);
1392 int ipmi_get_my_address(struct ipmi_user *user,
1393 unsigned int channel,
1394 unsigned char *address)
1398 user = acquire_ipmi_user(user, &index);
1402 if (channel >= IPMI_MAX_CHANNELS) {
1405 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1406 *address = user->intf->addrinfo[channel].address;
1408 release_ipmi_user(user, index);
1412 EXPORT_SYMBOL(ipmi_get_my_address);
1414 int ipmi_set_my_LUN(struct ipmi_user *user,
1415 unsigned int channel,
1420 user = acquire_ipmi_user(user, &index);
1424 if (channel >= IPMI_MAX_CHANNELS) {
1427 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1428 user->intf->addrinfo[channel].lun = LUN & 0x3;
1430 release_ipmi_user(user, index);
1434 EXPORT_SYMBOL(ipmi_set_my_LUN);
1436 int ipmi_get_my_LUN(struct ipmi_user *user,
1437 unsigned int channel,
1438 unsigned char *address)
1442 user = acquire_ipmi_user(user, &index);
1446 if (channel >= IPMI_MAX_CHANNELS) {
1449 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1450 *address = user->intf->addrinfo[channel].lun;
1452 release_ipmi_user(user, index);
1456 EXPORT_SYMBOL(ipmi_get_my_LUN);
1458 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1461 unsigned long flags;
1463 user = acquire_ipmi_user(user, &index);
1467 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1468 mode = user->intf->maintenance_mode;
1469 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1470 release_ipmi_user(user, index);
1474 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1476 static void maintenance_mode_update(struct ipmi_smi *intf)
1478 if (intf->handlers->set_maintenance_mode)
1479 intf->handlers->set_maintenance_mode(
1480 intf->send_info, intf->maintenance_mode_enable);
1483 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1486 unsigned long flags;
1487 struct ipmi_smi *intf = user->intf;
1489 user = acquire_ipmi_user(user, &index);
1493 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1494 if (intf->maintenance_mode != mode) {
1496 case IPMI_MAINTENANCE_MODE_AUTO:
1497 intf->maintenance_mode_enable
1498 = (intf->auto_maintenance_timeout > 0);
1501 case IPMI_MAINTENANCE_MODE_OFF:
1502 intf->maintenance_mode_enable = false;
1505 case IPMI_MAINTENANCE_MODE_ON:
1506 intf->maintenance_mode_enable = true;
1513 intf->maintenance_mode = mode;
1515 maintenance_mode_update(intf);
1518 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1519 release_ipmi_user(user, index);
1523 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1525 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1527 unsigned long flags;
1528 struct ipmi_smi *intf = user->intf;
1529 struct ipmi_recv_msg *msg, *msg2;
1530 struct list_head msgs;
1533 user = acquire_ipmi_user(user, &index);
1537 INIT_LIST_HEAD(&msgs);
1539 spin_lock_irqsave(&intf->events_lock, flags);
1540 if (user->gets_events == val)
1543 user->gets_events = val;
1546 if (atomic_inc_return(&intf->event_waiters) == 1)
1549 atomic_dec(&intf->event_waiters);
1552 if (intf->delivering_events)
1554 * Another thread is delivering events for this, so
1555 * let it handle any new events.
1559 /* Deliver any queued events. */
1560 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1561 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1562 list_move_tail(&msg->link, &msgs);
1563 intf->waiting_events_count = 0;
1564 if (intf->event_msg_printed) {
1565 dev_warn(intf->si_dev, "Event queue no longer full\n");
1566 intf->event_msg_printed = 0;
1569 intf->delivering_events = 1;
1570 spin_unlock_irqrestore(&intf->events_lock, flags);
1572 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1574 kref_get(&user->refcount);
1575 deliver_local_response(intf, msg);
1578 spin_lock_irqsave(&intf->events_lock, flags);
1579 intf->delivering_events = 0;
1583 spin_unlock_irqrestore(&intf->events_lock, flags);
1584 release_ipmi_user(user, index);
1588 EXPORT_SYMBOL(ipmi_set_gets_events);
1590 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1591 unsigned char netfn,
1595 struct cmd_rcvr *rcvr;
1597 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1598 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1599 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1600 && (rcvr->chans & (1 << chan)))
1606 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1607 unsigned char netfn,
1611 struct cmd_rcvr *rcvr;
1613 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1614 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1615 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1616 && (rcvr->chans & chans))
1622 int ipmi_register_for_cmd(struct ipmi_user *user,
1623 unsigned char netfn,
1627 struct ipmi_smi *intf = user->intf;
1628 struct cmd_rcvr *rcvr;
1631 user = acquire_ipmi_user(user, &index);
1635 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1641 rcvr->netfn = netfn;
1642 rcvr->chans = chans;
1645 mutex_lock(&intf->cmd_rcvrs_mutex);
1646 /* Make sure the command/netfn is not already registered. */
1647 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1652 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1654 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1657 mutex_unlock(&intf->cmd_rcvrs_mutex);
1661 release_ipmi_user(user, index);
1665 EXPORT_SYMBOL(ipmi_register_for_cmd);
1667 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1668 unsigned char netfn,
1672 struct ipmi_smi *intf = user->intf;
1673 struct cmd_rcvr *rcvr;
1674 struct cmd_rcvr *rcvrs = NULL;
1675 int i, rv = -ENOENT, index;
1677 user = acquire_ipmi_user(user, &index);
1681 mutex_lock(&intf->cmd_rcvrs_mutex);
1682 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1683 if (((1 << i) & chans) == 0)
1685 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1688 if (rcvr->user == user) {
1690 rcvr->chans &= ~chans;
1691 if (rcvr->chans == 0) {
1692 list_del_rcu(&rcvr->link);
1698 mutex_unlock(&intf->cmd_rcvrs_mutex);
1700 release_ipmi_user(user, index);
1702 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1710 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1712 static unsigned char
1713 ipmb_checksum(unsigned char *data, int size)
1715 unsigned char csum = 0;
1717 for (; size > 0; size--, data++)
1723 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1724 struct kernel_ipmi_msg *msg,
1725 struct ipmi_ipmb_addr *ipmb_addr,
1727 unsigned char ipmb_seq,
1729 unsigned char source_address,
1730 unsigned char source_lun)
1734 /* Format the IPMB header data. */
1735 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1736 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1737 smi_msg->data[2] = ipmb_addr->channel;
1739 smi_msg->data[3] = 0;
1740 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1741 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1742 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1743 smi_msg->data[i+6] = source_address;
1744 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1745 smi_msg->data[i+8] = msg->cmd;
1747 /* Now tack on the data to the message. */
1748 if (msg->data_len > 0)
1749 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1750 smi_msg->data_size = msg->data_len + 9;
1752 /* Now calculate the checksum and tack it on. */
1753 smi_msg->data[i+smi_msg->data_size]
1754 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1757 * Add on the checksum size and the offset from the
1760 smi_msg->data_size += 1 + i;
1762 smi_msg->msgid = msgid;
1765 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1766 struct kernel_ipmi_msg *msg,
1767 struct ipmi_lan_addr *lan_addr,
1769 unsigned char ipmb_seq,
1770 unsigned char source_lun)
1772 /* Format the IPMB header data. */
1773 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1774 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1775 smi_msg->data[2] = lan_addr->channel;
1776 smi_msg->data[3] = lan_addr->session_handle;
1777 smi_msg->data[4] = lan_addr->remote_SWID;
1778 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1779 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1780 smi_msg->data[7] = lan_addr->local_SWID;
1781 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1782 smi_msg->data[9] = msg->cmd;
1784 /* Now tack on the data to the message. */
1785 if (msg->data_len > 0)
1786 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1787 smi_msg->data_size = msg->data_len + 10;
1789 /* Now calculate the checksum and tack it on. */
1790 smi_msg->data[smi_msg->data_size]
1791 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1794 * Add on the checksum size and the offset from the
1797 smi_msg->data_size += 1;
1799 smi_msg->msgid = msgid;
1802 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1803 struct ipmi_smi_msg *smi_msg,
1806 if (intf->curr_msg) {
1808 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1810 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1813 intf->curr_msg = smi_msg;
1819 static void smi_send(struct ipmi_smi *intf,
1820 const struct ipmi_smi_handlers *handlers,
1821 struct ipmi_smi_msg *smi_msg, int priority)
1823 int run_to_completion = intf->run_to_completion;
1824 unsigned long flags = 0;
1826 if (!run_to_completion)
1827 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1828 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1830 if (!run_to_completion)
1831 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1834 handlers->sender(intf->send_info, smi_msg);
1837 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1839 return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1840 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1841 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1842 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1845 static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1846 struct ipmi_addr *addr,
1848 struct kernel_ipmi_msg *msg,
1849 struct ipmi_smi_msg *smi_msg,
1850 struct ipmi_recv_msg *recv_msg,
1852 unsigned int retry_time_ms)
1854 struct ipmi_system_interface_addr *smi_addr;
1857 /* Responses are not allowed to the SMI. */
1860 smi_addr = (struct ipmi_system_interface_addr *) addr;
1861 if (smi_addr->lun > 3) {
1862 ipmi_inc_stat(intf, sent_invalid_commands);
1866 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1868 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1869 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1870 || (msg->cmd == IPMI_GET_MSG_CMD)
1871 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1873 * We don't let the user do these, since we manage
1874 * the sequence numbers.
1876 ipmi_inc_stat(intf, sent_invalid_commands);
1880 if (is_maintenance_mode_cmd(msg)) {
1881 unsigned long flags;
1883 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1884 intf->auto_maintenance_timeout
1885 = maintenance_mode_timeout_ms;
1886 if (!intf->maintenance_mode
1887 && !intf->maintenance_mode_enable) {
1888 intf->maintenance_mode_enable = true;
1889 maintenance_mode_update(intf);
1891 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1895 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1896 ipmi_inc_stat(intf, sent_invalid_commands);
1900 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1901 smi_msg->data[1] = msg->cmd;
1902 smi_msg->msgid = msgid;
1903 smi_msg->user_data = recv_msg;
1904 if (msg->data_len > 0)
1905 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1906 smi_msg->data_size = msg->data_len + 2;
1907 ipmi_inc_stat(intf, sent_local_commands);
1912 static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1913 struct ipmi_addr *addr,
1915 struct kernel_ipmi_msg *msg,
1916 struct ipmi_smi_msg *smi_msg,
1917 struct ipmi_recv_msg *recv_msg,
1918 unsigned char source_address,
1919 unsigned char source_lun,
1921 unsigned int retry_time_ms)
1923 struct ipmi_ipmb_addr *ipmb_addr;
1924 unsigned char ipmb_seq;
1927 struct ipmi_channel *chans;
1930 if (addr->channel >= IPMI_MAX_CHANNELS) {
1931 ipmi_inc_stat(intf, sent_invalid_commands);
1935 chans = READ_ONCE(intf->channel_list)->c;
1937 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1938 ipmi_inc_stat(intf, sent_invalid_commands);
1942 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1944 * Broadcasts add a zero at the beginning of the
1945 * message, but otherwise is the same as an IPMB
1948 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1950 retries = 0; /* Don't retry broadcasts. */
1954 * 9 for the header and 1 for the checksum, plus
1955 * possibly one for the broadcast.
1957 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1958 ipmi_inc_stat(intf, sent_invalid_commands);
1962 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1963 if (ipmb_addr->lun > 3) {
1964 ipmi_inc_stat(intf, sent_invalid_commands);
1968 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1970 if (recv_msg->msg.netfn & 0x1) {
1972 * It's a response, so use the user's sequence
1975 ipmi_inc_stat(intf, sent_ipmb_responses);
1976 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1978 source_address, source_lun);
1981 * Save the receive message so we can use it
1982 * to deliver the response.
1984 smi_msg->user_data = recv_msg;
1986 /* It's a command, so get a sequence for it. */
1987 unsigned long flags;
1989 spin_lock_irqsave(&intf->seq_lock, flags);
1991 if (is_maintenance_mode_cmd(msg))
1992 intf->ipmb_maintenance_mode_timeout =
1993 maintenance_mode_timeout_ms;
1995 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1996 /* Different default in maintenance mode */
1997 retry_time_ms = default_maintenance_retry_ms;
2000 * Create a sequence number with a 1 second
2001 * timeout and 4 retries.
2003 rv = intf_next_seq(intf,
2012 * We have used up all the sequence numbers,
2013 * probably, so abort.
2017 ipmi_inc_stat(intf, sent_ipmb_commands);
2020 * Store the sequence number in the message,
2021 * so that when the send message response
2022 * comes back we can start the timer.
2024 format_ipmb_msg(smi_msg, msg, ipmb_addr,
2025 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2026 ipmb_seq, broadcast,
2027 source_address, source_lun);
2030 * Copy the message into the recv message data, so we
2031 * can retransmit it later if necessary.
2033 memcpy(recv_msg->msg_data, smi_msg->data,
2034 smi_msg->data_size);
2035 recv_msg->msg.data = recv_msg->msg_data;
2036 recv_msg->msg.data_len = smi_msg->data_size;
2039 * We don't unlock until here, because we need
2040 * to copy the completed message into the
2041 * recv_msg before we release the lock.
2042 * Otherwise, race conditions may bite us. I
2043 * know that's pretty paranoid, but I prefer
2047 spin_unlock_irqrestore(&intf->seq_lock, flags);
2053 static int i_ipmi_req_lan(struct ipmi_smi *intf,
2054 struct ipmi_addr *addr,
2056 struct kernel_ipmi_msg *msg,
2057 struct ipmi_smi_msg *smi_msg,
2058 struct ipmi_recv_msg *recv_msg,
2059 unsigned char source_lun,
2061 unsigned int retry_time_ms)
2063 struct ipmi_lan_addr *lan_addr;
2064 unsigned char ipmb_seq;
2066 struct ipmi_channel *chans;
2069 if (addr->channel >= IPMI_MAX_CHANNELS) {
2070 ipmi_inc_stat(intf, sent_invalid_commands);
2074 chans = READ_ONCE(intf->channel_list)->c;
2076 if ((chans[addr->channel].medium
2077 != IPMI_CHANNEL_MEDIUM_8023LAN)
2078 && (chans[addr->channel].medium
2079 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2080 ipmi_inc_stat(intf, sent_invalid_commands);
2084 /* 11 for the header and 1 for the checksum. */
2085 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2086 ipmi_inc_stat(intf, sent_invalid_commands);
2090 lan_addr = (struct ipmi_lan_addr *) addr;
2091 if (lan_addr->lun > 3) {
2092 ipmi_inc_stat(intf, sent_invalid_commands);
2096 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2098 if (recv_msg->msg.netfn & 0x1) {
2100 * It's a response, so use the user's sequence
2103 ipmi_inc_stat(intf, sent_lan_responses);
2104 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2108 * Save the receive message so we can use it
2109 * to deliver the response.
2111 smi_msg->user_data = recv_msg;
2113 /* It's a command, so get a sequence for it. */
2114 unsigned long flags;
2116 spin_lock_irqsave(&intf->seq_lock, flags);
2119 * Create a sequence number with a 1 second
2120 * timeout and 4 retries.
2122 rv = intf_next_seq(intf,
2131 * We have used up all the sequence numbers,
2132 * probably, so abort.
2136 ipmi_inc_stat(intf, sent_lan_commands);
2139 * Store the sequence number in the message,
2140 * so that when the send message response
2141 * comes back we can start the timer.
2143 format_lan_msg(smi_msg, msg, lan_addr,
2144 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2145 ipmb_seq, source_lun);
2148 * Copy the message into the recv message data, so we
2149 * can retransmit it later if necessary.
2151 memcpy(recv_msg->msg_data, smi_msg->data,
2152 smi_msg->data_size);
2153 recv_msg->msg.data = recv_msg->msg_data;
2154 recv_msg->msg.data_len = smi_msg->data_size;
2157 * We don't unlock until here, because we need
2158 * to copy the completed message into the
2159 * recv_msg before we release the lock.
2160 * Otherwise, race conditions may bite us. I
2161 * know that's pretty paranoid, but I prefer
2165 spin_unlock_irqrestore(&intf->seq_lock, flags);
2172 * Separate from ipmi_request so that the user does not have to be
2173 * supplied in certain circumstances (mainly at panic time). If
2174 * messages are supplied, they will be freed, even if an error
2177 static int i_ipmi_request(struct ipmi_user *user,
2178 struct ipmi_smi *intf,
2179 struct ipmi_addr *addr,
2181 struct kernel_ipmi_msg *msg,
2182 void *user_msg_data,
2184 struct ipmi_recv_msg *supplied_recv,
2186 unsigned char source_address,
2187 unsigned char source_lun,
2189 unsigned int retry_time_ms)
2191 struct ipmi_smi_msg *smi_msg;
2192 struct ipmi_recv_msg *recv_msg;
2196 recv_msg = supplied_recv;
2198 recv_msg = ipmi_alloc_recv_msg();
2199 if (recv_msg == NULL) {
2204 recv_msg->user_msg_data = user_msg_data;
2207 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2209 smi_msg = ipmi_alloc_smi_msg();
2210 if (smi_msg == NULL) {
2212 ipmi_free_recv_msg(recv_msg);
2219 if (intf->in_shutdown) {
2224 recv_msg->user = user;
2226 /* The put happens when the message is freed. */
2227 kref_get(&user->refcount);
2228 recv_msg->msgid = msgid;
2230 * Store the message to send in the receive message so timeout
2231 * responses can get the proper response data.
2233 recv_msg->msg = *msg;
2235 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2236 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2237 recv_msg, retries, retry_time_ms);
2238 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2239 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2240 source_address, source_lun,
2241 retries, retry_time_ms);
2242 } else if (is_lan_addr(addr)) {
2243 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2244 source_lun, retries, retry_time_ms);
2246 /* Unknown address type. */
2247 ipmi_inc_stat(intf, sent_invalid_commands);
2253 ipmi_free_smi_msg(smi_msg);
2254 ipmi_free_recv_msg(recv_msg);
2256 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
2258 smi_send(intf, intf->handlers, smi_msg, priority);
2266 static int check_addr(struct ipmi_smi *intf,
2267 struct ipmi_addr *addr,
2268 unsigned char *saddr,
2271 if (addr->channel >= IPMI_MAX_CHANNELS)
2273 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2274 *lun = intf->addrinfo[addr->channel].lun;
2275 *saddr = intf->addrinfo[addr->channel].address;
2279 int ipmi_request_settime(struct ipmi_user *user,
2280 struct ipmi_addr *addr,
2282 struct kernel_ipmi_msg *msg,
2283 void *user_msg_data,
2286 unsigned int retry_time_ms)
2288 unsigned char saddr = 0, lun = 0;
2294 user = acquire_ipmi_user(user, &index);
2298 rv = check_addr(user->intf, addr, &saddr, &lun);
2300 rv = i_ipmi_request(user,
2313 release_ipmi_user(user, index);
2316 EXPORT_SYMBOL(ipmi_request_settime);
2318 int ipmi_request_supply_msgs(struct ipmi_user *user,
2319 struct ipmi_addr *addr,
2321 struct kernel_ipmi_msg *msg,
2322 void *user_msg_data,
2324 struct ipmi_recv_msg *supplied_recv,
2327 unsigned char saddr = 0, lun = 0;
2333 user = acquire_ipmi_user(user, &index);
2337 rv = check_addr(user->intf, addr, &saddr, &lun);
2339 rv = i_ipmi_request(user,
2352 release_ipmi_user(user, index);
2355 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2357 static void bmc_device_id_handler(struct ipmi_smi *intf,
2358 struct ipmi_recv_msg *msg)
2362 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2363 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2364 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2365 dev_warn(intf->si_dev,
2366 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2367 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2371 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2372 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2374 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2375 /* record completion code when error */
2376 intf->bmc->cc = msg->msg.data[0];
2377 intf->bmc->dyn_id_set = 0;
2380 * Make sure the id data is available before setting
2384 intf->bmc->dyn_id_set = 1;
2387 wake_up(&intf->waitq);
2391 send_get_device_id_cmd(struct ipmi_smi *intf)
2393 struct ipmi_system_interface_addr si;
2394 struct kernel_ipmi_msg msg;
2396 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2397 si.channel = IPMI_BMC_CHANNEL;
2400 msg.netfn = IPMI_NETFN_APP_REQUEST;
2401 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2405 return i_ipmi_request(NULL,
2407 (struct ipmi_addr *) &si,
2414 intf->addrinfo[0].address,
2415 intf->addrinfo[0].lun,
2419 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2422 unsigned int retry_count = 0;
2424 intf->null_user_handler = bmc_device_id_handler;
2428 bmc->dyn_id_set = 2;
2430 rv = send_get_device_id_cmd(intf);
2432 goto out_reset_handler;
2434 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2436 if (!bmc->dyn_id_set) {
2437 if (bmc->cc != IPMI_CC_NO_ERROR &&
2438 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2440 dev_warn(intf->si_dev,
2441 "BMC returned 0x%2.2x, retry get bmc device id\n",
2446 rv = -EIO; /* Something went wrong in the fetch. */
2449 /* dyn_id_set makes the id data available. */
2453 intf->null_user_handler = NULL;
2459 * Fetch the device id for the bmc/interface. You must pass in either
2460 * bmc or intf, this code will get the other one. If the data has
2461 * been recently fetched, this will just use the cached data. Otherwise
2462 * it will run a new fetch.
2464 * Except for the first time this is called (in ipmi_add_smi()),
2465 * this will always return good data;
2467 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2468 struct ipmi_device_id *id,
2469 bool *guid_set, guid_t *guid, int intf_num)
2472 int prev_dyn_id_set, prev_guid_set;
2473 bool intf_set = intf != NULL;
2476 mutex_lock(&bmc->dyn_mutex);
2478 if (list_empty(&bmc->intfs)) {
2479 mutex_unlock(&bmc->dyn_mutex);
2482 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2484 kref_get(&intf->refcount);
2485 mutex_unlock(&bmc->dyn_mutex);
2486 mutex_lock(&intf->bmc_reg_mutex);
2487 mutex_lock(&bmc->dyn_mutex);
2488 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2490 mutex_unlock(&intf->bmc_reg_mutex);
2491 kref_put(&intf->refcount, intf_free);
2492 goto retry_bmc_lock;
2495 mutex_lock(&intf->bmc_reg_mutex);
2497 mutex_lock(&bmc->dyn_mutex);
2498 kref_get(&intf->refcount);
2501 /* If we have a valid and current ID, just return that. */
2502 if (intf->in_bmc_register ||
2503 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2504 goto out_noprocessing;
2506 prev_guid_set = bmc->dyn_guid_set;
2509 prev_dyn_id_set = bmc->dyn_id_set;
2510 rv = __get_device_id(intf, bmc);
2515 * The guid, device id, manufacturer id, and product id should
2516 * not change on a BMC. If it does we have to do some dancing.
2518 if (!intf->bmc_registered
2519 || (!prev_guid_set && bmc->dyn_guid_set)
2520 || (!prev_dyn_id_set && bmc->dyn_id_set)
2521 || (prev_guid_set && bmc->dyn_guid_set
2522 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2523 || bmc->id.device_id != bmc->fetch_id.device_id
2524 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2525 || bmc->id.product_id != bmc->fetch_id.product_id) {
2526 struct ipmi_device_id id = bmc->fetch_id;
2527 int guid_set = bmc->dyn_guid_set;
2530 guid = bmc->fetch_guid;
2531 mutex_unlock(&bmc->dyn_mutex);
2533 __ipmi_bmc_unregister(intf);
2534 /* Fill in the temporary BMC for good measure. */
2536 intf->bmc->dyn_guid_set = guid_set;
2537 intf->bmc->guid = guid;
2538 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2539 need_waiter(intf); /* Retry later on an error. */
2541 __scan_channels(intf, &id);
2546 * We weren't given the interface on the
2547 * command line, so restart the operation on
2548 * the next interface for the BMC.
2550 mutex_unlock(&intf->bmc_reg_mutex);
2551 mutex_lock(&bmc->dyn_mutex);
2552 goto retry_bmc_lock;
2555 /* We have a new BMC, set it up. */
2557 mutex_lock(&bmc->dyn_mutex);
2558 goto out_noprocessing;
2559 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2560 /* Version info changes, scan the channels again. */
2561 __scan_channels(intf, &bmc->fetch_id);
2563 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2566 if (rv && prev_dyn_id_set) {
2567 rv = 0; /* Ignore failures if we have previous data. */
2568 bmc->dyn_id_set = prev_dyn_id_set;
2571 bmc->id = bmc->fetch_id;
2572 if (bmc->dyn_guid_set)
2573 bmc->guid = bmc->fetch_guid;
2574 else if (prev_guid_set)
2576 * The guid used to be valid and it failed to fetch,
2577 * just use the cached value.
2579 bmc->dyn_guid_set = prev_guid_set;
2587 *guid_set = bmc->dyn_guid_set;
2589 if (guid && bmc->dyn_guid_set)
2593 mutex_unlock(&bmc->dyn_mutex);
2594 mutex_unlock(&intf->bmc_reg_mutex);
2596 kref_put(&intf->refcount, intf_free);
2600 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2601 struct ipmi_device_id *id,
2602 bool *guid_set, guid_t *guid)
2604 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2607 static ssize_t device_id_show(struct device *dev,
2608 struct device_attribute *attr,
2611 struct bmc_device *bmc = to_bmc_device(dev);
2612 struct ipmi_device_id id;
2615 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2619 return snprintf(buf, 10, "%u\n", id.device_id);
2621 static DEVICE_ATTR_RO(device_id);
2623 static ssize_t provides_device_sdrs_show(struct device *dev,
2624 struct device_attribute *attr,
2627 struct bmc_device *bmc = to_bmc_device(dev);
2628 struct ipmi_device_id id;
2631 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2635 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2637 static DEVICE_ATTR_RO(provides_device_sdrs);
2639 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2642 struct bmc_device *bmc = to_bmc_device(dev);
2643 struct ipmi_device_id id;
2646 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2650 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2652 static DEVICE_ATTR_RO(revision);
2654 static ssize_t firmware_revision_show(struct device *dev,
2655 struct device_attribute *attr,
2658 struct bmc_device *bmc = to_bmc_device(dev);
2659 struct ipmi_device_id id;
2662 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2666 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2667 id.firmware_revision_2);
2669 static DEVICE_ATTR_RO(firmware_revision);
2671 static ssize_t ipmi_version_show(struct device *dev,
2672 struct device_attribute *attr,
2675 struct bmc_device *bmc = to_bmc_device(dev);
2676 struct ipmi_device_id id;
2679 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2683 return snprintf(buf, 20, "%u.%u\n",
2684 ipmi_version_major(&id),
2685 ipmi_version_minor(&id));
2687 static DEVICE_ATTR_RO(ipmi_version);
2689 static ssize_t add_dev_support_show(struct device *dev,
2690 struct device_attribute *attr,
2693 struct bmc_device *bmc = to_bmc_device(dev);
2694 struct ipmi_device_id id;
2697 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2701 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2703 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2706 static ssize_t manufacturer_id_show(struct device *dev,
2707 struct device_attribute *attr,
2710 struct bmc_device *bmc = to_bmc_device(dev);
2711 struct ipmi_device_id id;
2714 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2718 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2720 static DEVICE_ATTR_RO(manufacturer_id);
2722 static ssize_t product_id_show(struct device *dev,
2723 struct device_attribute *attr,
2726 struct bmc_device *bmc = to_bmc_device(dev);
2727 struct ipmi_device_id id;
2730 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2734 return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2736 static DEVICE_ATTR_RO(product_id);
2738 static ssize_t aux_firmware_rev_show(struct device *dev,
2739 struct device_attribute *attr,
2742 struct bmc_device *bmc = to_bmc_device(dev);
2743 struct ipmi_device_id id;
2746 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2750 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2751 id.aux_firmware_revision[3],
2752 id.aux_firmware_revision[2],
2753 id.aux_firmware_revision[1],
2754 id.aux_firmware_revision[0]);
2756 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2758 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2761 struct bmc_device *bmc = to_bmc_device(dev);
2766 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2772 return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2774 static DEVICE_ATTR_RO(guid);
2776 static struct attribute *bmc_dev_attrs[] = {
2777 &dev_attr_device_id.attr,
2778 &dev_attr_provides_device_sdrs.attr,
2779 &dev_attr_revision.attr,
2780 &dev_attr_firmware_revision.attr,
2781 &dev_attr_ipmi_version.attr,
2782 &dev_attr_additional_device_support.attr,
2783 &dev_attr_manufacturer_id.attr,
2784 &dev_attr_product_id.attr,
2785 &dev_attr_aux_firmware_revision.attr,
2786 &dev_attr_guid.attr,
2790 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2791 struct attribute *attr, int idx)
2793 struct device *dev = kobj_to_dev(kobj);
2794 struct bmc_device *bmc = to_bmc_device(dev);
2795 umode_t mode = attr->mode;
2798 if (attr == &dev_attr_aux_firmware_revision.attr) {
2799 struct ipmi_device_id id;
2801 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2802 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2804 if (attr == &dev_attr_guid.attr) {
2807 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2808 return (!rv && guid_set) ? mode : 0;
2813 static const struct attribute_group bmc_dev_attr_group = {
2814 .attrs = bmc_dev_attrs,
2815 .is_visible = bmc_dev_attr_is_visible,
2818 static const struct attribute_group *bmc_dev_attr_groups[] = {
2819 &bmc_dev_attr_group,
2823 static const struct device_type bmc_device_type = {
2824 .groups = bmc_dev_attr_groups,
2827 static int __find_bmc_guid(struct device *dev, const void *data)
2829 const guid_t *guid = data;
2830 struct bmc_device *bmc;
2833 if (dev->type != &bmc_device_type)
2836 bmc = to_bmc_device(dev);
2837 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2839 rv = kref_get_unless_zero(&bmc->usecount);
2844 * Returns with the bmc's usecount incremented, if it is non-NULL.
2846 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2850 struct bmc_device *bmc = NULL;
2852 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2854 bmc = to_bmc_device(dev);
2860 struct prod_dev_id {
2861 unsigned int product_id;
2862 unsigned char device_id;
2865 static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2867 const struct prod_dev_id *cid = data;
2868 struct bmc_device *bmc;
2871 if (dev->type != &bmc_device_type)
2874 bmc = to_bmc_device(dev);
2875 rv = (bmc->id.product_id == cid->product_id
2876 && bmc->id.device_id == cid->device_id);
2878 rv = kref_get_unless_zero(&bmc->usecount);
2883 * Returns with the bmc's usecount incremented, if it is non-NULL.
2885 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2886 struct device_driver *drv,
2887 unsigned int product_id, unsigned char device_id)
2889 struct prod_dev_id id = {
2890 .product_id = product_id,
2891 .device_id = device_id,
2894 struct bmc_device *bmc = NULL;
2896 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2898 bmc = to_bmc_device(dev);
2904 static DEFINE_IDA(ipmi_bmc_ida);
2907 release_bmc_device(struct device *dev)
2909 kfree(to_bmc_device(dev));
2912 static void cleanup_bmc_work(struct work_struct *work)
2914 struct bmc_device *bmc = container_of(work, struct bmc_device,
2916 int id = bmc->pdev.id; /* Unregister overwrites id */
2918 platform_device_unregister(&bmc->pdev);
2919 ida_simple_remove(&ipmi_bmc_ida, id);
2923 cleanup_bmc_device(struct kref *ref)
2925 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2928 * Remove the platform device in a work queue to avoid issues
2929 * with removing the device attributes while reading a device
2932 schedule_work(&bmc->remove_work);
2936 * Must be called with intf->bmc_reg_mutex held.
2938 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2940 struct bmc_device *bmc = intf->bmc;
2942 if (!intf->bmc_registered)
2945 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2946 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2947 kfree(intf->my_dev_name);
2948 intf->my_dev_name = NULL;
2950 mutex_lock(&bmc->dyn_mutex);
2951 list_del(&intf->bmc_link);
2952 mutex_unlock(&bmc->dyn_mutex);
2953 intf->bmc = &intf->tmp_bmc;
2954 kref_put(&bmc->usecount, cleanup_bmc_device);
2955 intf->bmc_registered = false;
2958 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2960 mutex_lock(&intf->bmc_reg_mutex);
2961 __ipmi_bmc_unregister(intf);
2962 mutex_unlock(&intf->bmc_reg_mutex);
2966 * Must be called with intf->bmc_reg_mutex held.
2968 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2969 struct ipmi_device_id *id,
2970 bool guid_set, guid_t *guid, int intf_num)
2973 struct bmc_device *bmc;
2974 struct bmc_device *old_bmc;
2977 * platform_device_register() can cause bmc_reg_mutex to
2978 * be claimed because of the is_visible functions of
2979 * the attributes. Eliminate possible recursion and
2982 intf->in_bmc_register = true;
2983 mutex_unlock(&intf->bmc_reg_mutex);
2986 * Try to find if there is an bmc_device struct
2987 * representing the interfaced BMC already
2989 mutex_lock(&ipmidriver_mutex);
2991 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2993 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2998 * If there is already an bmc_device, free the new one,
2999 * otherwise register the new BMC device
3004 * Note: old_bmc already has usecount incremented by
3005 * the BMC find functions.
3007 intf->bmc = old_bmc;
3008 mutex_lock(&bmc->dyn_mutex);
3009 list_add_tail(&intf->bmc_link, &bmc->intfs);
3010 mutex_unlock(&bmc->dyn_mutex);
3012 dev_info(intf->si_dev,
3013 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3014 bmc->id.manufacturer_id,
3018 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3023 INIT_LIST_HEAD(&bmc->intfs);
3024 mutex_init(&bmc->dyn_mutex);
3025 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3028 bmc->dyn_id_set = 1;
3029 bmc->dyn_guid_set = guid_set;
3031 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3033 bmc->pdev.name = "ipmi_bmc";
3035 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3041 bmc->pdev.dev.driver = &ipmidriver.driver;
3043 bmc->pdev.dev.release = release_bmc_device;
3044 bmc->pdev.dev.type = &bmc_device_type;
3045 kref_init(&bmc->usecount);
3048 mutex_lock(&bmc->dyn_mutex);
3049 list_add_tail(&intf->bmc_link, &bmc->intfs);
3050 mutex_unlock(&bmc->dyn_mutex);
3052 rv = platform_device_register(&bmc->pdev);
3054 dev_err(intf->si_dev,
3055 "Unable to register bmc device: %d\n",
3060 dev_info(intf->si_dev,
3061 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3062 bmc->id.manufacturer_id,
3068 * create symlink from system interface device to bmc device
3071 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3073 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3078 intf_num = intf->intf_num;
3079 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3080 if (!intf->my_dev_name) {
3082 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3087 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3090 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3092 goto out_free_my_dev_name;
3095 intf->bmc_registered = true;
3098 mutex_unlock(&ipmidriver_mutex);
3099 mutex_lock(&intf->bmc_reg_mutex);
3100 intf->in_bmc_register = false;
3104 out_free_my_dev_name:
3105 kfree(intf->my_dev_name);
3106 intf->my_dev_name = NULL;
3109 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3112 mutex_lock(&bmc->dyn_mutex);
3113 list_del(&intf->bmc_link);
3114 mutex_unlock(&bmc->dyn_mutex);
3115 intf->bmc = &intf->tmp_bmc;
3116 kref_put(&bmc->usecount, cleanup_bmc_device);
3120 mutex_lock(&bmc->dyn_mutex);
3121 list_del(&intf->bmc_link);
3122 mutex_unlock(&bmc->dyn_mutex);
3123 intf->bmc = &intf->tmp_bmc;
3124 put_device(&bmc->pdev.dev);
3129 send_guid_cmd(struct ipmi_smi *intf, int chan)
3131 struct kernel_ipmi_msg msg;
3132 struct ipmi_system_interface_addr si;
3134 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3135 si.channel = IPMI_BMC_CHANNEL;
3138 msg.netfn = IPMI_NETFN_APP_REQUEST;
3139 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3142 return i_ipmi_request(NULL,
3144 (struct ipmi_addr *) &si,
3151 intf->addrinfo[0].address,
3152 intf->addrinfo[0].lun,
3156 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3158 struct bmc_device *bmc = intf->bmc;
3160 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3161 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3162 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3166 if (msg->msg.data[0] != 0) {
3167 /* Error from getting the GUID, the BMC doesn't have one. */
3168 bmc->dyn_guid_set = 0;
3172 if (msg->msg.data_len < UUID_SIZE + 1) {
3173 bmc->dyn_guid_set = 0;
3174 dev_warn(intf->si_dev,
3175 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3176 msg->msg.data_len, UUID_SIZE + 1);
3180 import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3182 * Make sure the guid data is available before setting
3186 bmc->dyn_guid_set = 1;
3188 wake_up(&intf->waitq);
3191 static void __get_guid(struct ipmi_smi *intf)
3194 struct bmc_device *bmc = intf->bmc;
3196 bmc->dyn_guid_set = 2;
3197 intf->null_user_handler = guid_handler;
3198 rv = send_guid_cmd(intf, 0);
3200 /* Send failed, no GUID available. */
3201 bmc->dyn_guid_set = 0;
3203 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3205 /* dyn_guid_set makes the guid data available. */
3208 intf->null_user_handler = NULL;
3212 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3214 struct kernel_ipmi_msg msg;
3215 unsigned char data[1];
3216 struct ipmi_system_interface_addr si;
3218 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3219 si.channel = IPMI_BMC_CHANNEL;
3222 msg.netfn = IPMI_NETFN_APP_REQUEST;
3223 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3227 return i_ipmi_request(NULL,
3229 (struct ipmi_addr *) &si,
3236 intf->addrinfo[0].address,
3237 intf->addrinfo[0].lun,
3242 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3246 unsigned int set = intf->curr_working_cset;
3247 struct ipmi_channel *chans;
3249 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3250 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3251 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3252 /* It's the one we want */
3253 if (msg->msg.data[0] != 0) {
3254 /* Got an error from the channel, just go on. */
3255 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3257 * If the MC does not support this
3258 * command, that is legal. We just
3259 * assume it has one IPMB at channel
3262 intf->wchannels[set].c[0].medium
3263 = IPMI_CHANNEL_MEDIUM_IPMB;
3264 intf->wchannels[set].c[0].protocol
3265 = IPMI_CHANNEL_PROTOCOL_IPMB;
3267 intf->channel_list = intf->wchannels + set;
3268 intf->channels_ready = true;
3269 wake_up(&intf->waitq);
3274 if (msg->msg.data_len < 4) {
3275 /* Message not big enough, just go on. */
3278 ch = intf->curr_channel;
3279 chans = intf->wchannels[set].c;
3280 chans[ch].medium = msg->msg.data[2] & 0x7f;
3281 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3284 intf->curr_channel++;
3285 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3286 intf->channel_list = intf->wchannels + set;
3287 intf->channels_ready = true;
3288 wake_up(&intf->waitq);
3290 intf->channel_list = intf->wchannels + set;
3291 intf->channels_ready = true;
3292 rv = send_channel_info_cmd(intf, intf->curr_channel);
3296 /* Got an error somehow, just give up. */
3297 dev_warn(intf->si_dev,
3298 "Error sending channel information for channel %d: %d\n",
3299 intf->curr_channel, rv);
3301 intf->channel_list = intf->wchannels + set;
3302 intf->channels_ready = true;
3303 wake_up(&intf->waitq);
3311 * Must be holding intf->bmc_reg_mutex to call this.
3313 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3317 if (ipmi_version_major(id) > 1
3318 || (ipmi_version_major(id) == 1
3319 && ipmi_version_minor(id) >= 5)) {
3323 * Start scanning the channels to see what is
3326 set = !intf->curr_working_cset;
3327 intf->curr_working_cset = set;
3328 memset(&intf->wchannels[set], 0,
3329 sizeof(struct ipmi_channel_set));
3331 intf->null_user_handler = channel_handler;
3332 intf->curr_channel = 0;
3333 rv = send_channel_info_cmd(intf, 0);
3335 dev_warn(intf->si_dev,
3336 "Error sending channel information for channel 0, %d\n",
3338 intf->null_user_handler = NULL;
3342 /* Wait for the channel info to be read. */
3343 wait_event(intf->waitq, intf->channels_ready);
3344 intf->null_user_handler = NULL;
3346 unsigned int set = intf->curr_working_cset;
3348 /* Assume a single IPMB channel at zero. */
3349 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3350 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3351 intf->channel_list = intf->wchannels + set;
3352 intf->channels_ready = true;
3358 static void ipmi_poll(struct ipmi_smi *intf)
3360 if (intf->handlers->poll)
3361 intf->handlers->poll(intf->send_info);
3362 /* In case something came in */
3363 handle_new_recv_msgs(intf);
3366 void ipmi_poll_interface(struct ipmi_user *user)
3368 ipmi_poll(user->intf);
3370 EXPORT_SYMBOL(ipmi_poll_interface);
3372 static void redo_bmc_reg(struct work_struct *work)
3374 struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3377 if (!intf->in_shutdown)
3378 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3380 kref_put(&intf->refcount, intf_free);
3383 int ipmi_add_smi(struct module *owner,
3384 const struct ipmi_smi_handlers *handlers,
3386 struct device *si_dev,
3387 unsigned char slave_addr)
3391 struct ipmi_smi *intf, *tintf;
3392 struct list_head *link;
3393 struct ipmi_device_id id;
3396 * Make sure the driver is actually initialized, this handles
3397 * problems with initialization order.
3399 rv = ipmi_init_msghandler();
3403 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3407 rv = init_srcu_struct(&intf->users_srcu);
3413 intf->owner = owner;
3414 intf->bmc = &intf->tmp_bmc;
3415 INIT_LIST_HEAD(&intf->bmc->intfs);
3416 mutex_init(&intf->bmc->dyn_mutex);
3417 INIT_LIST_HEAD(&intf->bmc_link);
3418 mutex_init(&intf->bmc_reg_mutex);
3419 intf->intf_num = -1; /* Mark it invalid for now. */
3420 kref_init(&intf->refcount);
3421 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3422 intf->si_dev = si_dev;
3423 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3424 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3425 intf->addrinfo[j].lun = 2;
3427 if (slave_addr != 0)
3428 intf->addrinfo[0].address = slave_addr;
3429 INIT_LIST_HEAD(&intf->users);
3430 intf->handlers = handlers;
3431 intf->send_info = send_info;
3432 spin_lock_init(&intf->seq_lock);
3433 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3434 intf->seq_table[j].inuse = 0;
3435 intf->seq_table[j].seqid = 0;
3438 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3439 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3440 tasklet_setup(&intf->recv_tasklet,
3442 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3443 spin_lock_init(&intf->xmit_msgs_lock);
3444 INIT_LIST_HEAD(&intf->xmit_msgs);
3445 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3446 spin_lock_init(&intf->events_lock);
3447 spin_lock_init(&intf->watch_lock);
3448 atomic_set(&intf->event_waiters, 0);
3449 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3450 INIT_LIST_HEAD(&intf->waiting_events);
3451 intf->waiting_events_count = 0;
3452 mutex_init(&intf->cmd_rcvrs_mutex);
3453 spin_lock_init(&intf->maintenance_mode_lock);
3454 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3455 init_waitqueue_head(&intf->waitq);
3456 for (i = 0; i < IPMI_NUM_STATS; i++)
3457 atomic_set(&intf->stats[i], 0);
3459 mutex_lock(&ipmi_interfaces_mutex);
3460 /* Look for a hole in the numbers. */
3462 link = &ipmi_interfaces;
3463 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3464 ipmi_interfaces_mutex_held()) {
3465 if (tintf->intf_num != i) {
3466 link = &tintf->link;
3471 /* Add the new interface in numeric order. */
3473 list_add_rcu(&intf->link, &ipmi_interfaces);
3475 list_add_tail_rcu(&intf->link, link);
3477 rv = handlers->start_processing(send_info, intf);
3481 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3483 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3484 goto out_err_started;
3487 mutex_lock(&intf->bmc_reg_mutex);
3488 rv = __scan_channels(intf, &id);
3489 mutex_unlock(&intf->bmc_reg_mutex);
3491 goto out_err_bmc_reg;
3494 * Keep memory order straight for RCU readers. Make
3495 * sure everything else is committed to memory before
3496 * setting intf_num to mark the interface valid.
3500 mutex_unlock(&ipmi_interfaces_mutex);
3502 /* After this point the interface is legal to use. */
3503 call_smi_watchers(i, intf->si_dev);
3508 ipmi_bmc_unregister(intf);
3510 if (intf->handlers->shutdown)
3511 intf->handlers->shutdown(intf->send_info);
3513 list_del_rcu(&intf->link);
3514 mutex_unlock(&ipmi_interfaces_mutex);
3515 synchronize_srcu(&ipmi_interfaces_srcu);
3516 cleanup_srcu_struct(&intf->users_srcu);
3517 kref_put(&intf->refcount, intf_free);
3521 EXPORT_SYMBOL(ipmi_add_smi);
3523 static void deliver_smi_err_response(struct ipmi_smi *intf,
3524 struct ipmi_smi_msg *msg,
3527 msg->rsp[0] = msg->data[0] | 4;
3528 msg->rsp[1] = msg->data[1];
3531 /* It's an error, so it will never requeue, no need to check return. */
3532 handle_one_recv_msg(intf, msg);
3535 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3538 struct seq_table *ent;
3539 struct ipmi_smi_msg *msg;
3540 struct list_head *entry;
3541 struct list_head tmplist;
3543 /* Clear out our transmit queues and hold the messages. */
3544 INIT_LIST_HEAD(&tmplist);
3545 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3546 list_splice_tail(&intf->xmit_msgs, &tmplist);
3548 /* Current message first, to preserve order */
3549 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3550 /* Wait for the message to clear out. */
3551 schedule_timeout(1);
3554 /* No need for locks, the interface is down. */
3557 * Return errors for all pending messages in queue and in the
3558 * tables waiting for remote responses.
3560 while (!list_empty(&tmplist)) {
3561 entry = tmplist.next;
3563 msg = list_entry(entry, struct ipmi_smi_msg, link);
3564 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3567 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3568 ent = &intf->seq_table[i];
3571 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3575 void ipmi_unregister_smi(struct ipmi_smi *intf)
3577 struct ipmi_smi_watcher *w;
3578 int intf_num = intf->intf_num, index;
3580 mutex_lock(&ipmi_interfaces_mutex);
3581 intf->intf_num = -1;
3582 intf->in_shutdown = true;
3583 list_del_rcu(&intf->link);
3584 mutex_unlock(&ipmi_interfaces_mutex);
3585 synchronize_srcu(&ipmi_interfaces_srcu);
3587 /* At this point no users can be added to the interface. */
3590 * Call all the watcher interfaces to tell them that
3591 * an interface is going away.
3593 mutex_lock(&smi_watchers_mutex);
3594 list_for_each_entry(w, &smi_watchers, link)
3595 w->smi_gone(intf_num);
3596 mutex_unlock(&smi_watchers_mutex);
3598 index = srcu_read_lock(&intf->users_srcu);
3599 while (!list_empty(&intf->users)) {
3600 struct ipmi_user *user =
3601 container_of(list_next_rcu(&intf->users),
3602 struct ipmi_user, link);
3604 _ipmi_destroy_user(user);
3606 srcu_read_unlock(&intf->users_srcu, index);
3608 if (intf->handlers->shutdown)
3609 intf->handlers->shutdown(intf->send_info);
3611 cleanup_smi_msgs(intf);
3613 ipmi_bmc_unregister(intf);
3615 cleanup_srcu_struct(&intf->users_srcu);
3616 kref_put(&intf->refcount, intf_free);
3618 EXPORT_SYMBOL(ipmi_unregister_smi);
3620 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3621 struct ipmi_smi_msg *msg)
3623 struct ipmi_ipmb_addr ipmb_addr;
3624 struct ipmi_recv_msg *recv_msg;
3627 * This is 11, not 10, because the response must contain a
3630 if (msg->rsp_size < 11) {
3631 /* Message not big enough, just ignore it. */
3632 ipmi_inc_stat(intf, invalid_ipmb_responses);
3636 if (msg->rsp[2] != 0) {
3637 /* An error getting the response, just ignore it. */
3641 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3642 ipmb_addr.slave_addr = msg->rsp[6];
3643 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3644 ipmb_addr.lun = msg->rsp[7] & 3;
3647 * It's a response from a remote entity. Look up the sequence
3648 * number and handle the response.
3650 if (intf_find_seq(intf,
3654 (msg->rsp[4] >> 2) & (~1),
3655 (struct ipmi_addr *) &ipmb_addr,
3658 * We were unable to find the sequence number,
3659 * so just nuke the message.
3661 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3665 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3667 * The other fields matched, so no need to set them, except
3668 * for netfn, which needs to be the response that was
3669 * returned, not the request value.
3671 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3672 recv_msg->msg.data = recv_msg->msg_data;
3673 recv_msg->msg.data_len = msg->rsp_size - 10;
3674 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3675 if (deliver_response(intf, recv_msg))
3676 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3678 ipmi_inc_stat(intf, handled_ipmb_responses);
3683 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3684 struct ipmi_smi_msg *msg)
3686 struct cmd_rcvr *rcvr;
3688 unsigned char netfn;
3691 struct ipmi_user *user = NULL;
3692 struct ipmi_ipmb_addr *ipmb_addr;
3693 struct ipmi_recv_msg *recv_msg;
3695 if (msg->rsp_size < 10) {
3696 /* Message not big enough, just ignore it. */
3697 ipmi_inc_stat(intf, invalid_commands);
3701 if (msg->rsp[2] != 0) {
3702 /* An error getting the response, just ignore it. */
3706 netfn = msg->rsp[4] >> 2;
3708 chan = msg->rsp[3] & 0xf;
3711 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3714 kref_get(&user->refcount);
3720 /* We didn't find a user, deliver an error response. */
3721 ipmi_inc_stat(intf, unhandled_commands);
3723 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3724 msg->data[1] = IPMI_SEND_MSG_CMD;
3725 msg->data[2] = msg->rsp[3];
3726 msg->data[3] = msg->rsp[6];
3727 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3728 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3729 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3731 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3732 msg->data[8] = msg->rsp[8]; /* cmd */
3733 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3734 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3735 msg->data_size = 11;
3737 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
3740 if (!intf->in_shutdown) {
3741 smi_send(intf, intf->handlers, msg, 0);
3743 * We used the message, so return the value
3744 * that causes it to not be freed or
3751 recv_msg = ipmi_alloc_recv_msg();
3754 * We couldn't allocate memory for the
3755 * message, so requeue it for handling
3759 kref_put(&user->refcount, free_user);
3761 /* Extract the source address from the data. */
3762 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3763 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3764 ipmb_addr->slave_addr = msg->rsp[6];
3765 ipmb_addr->lun = msg->rsp[7] & 3;
3766 ipmb_addr->channel = msg->rsp[3] & 0xf;
3769 * Extract the rest of the message information
3770 * from the IPMB header.
3772 recv_msg->user = user;
3773 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3774 recv_msg->msgid = msg->rsp[7] >> 2;
3775 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3776 recv_msg->msg.cmd = msg->rsp[8];
3777 recv_msg->msg.data = recv_msg->msg_data;
3780 * We chop off 10, not 9 bytes because the checksum
3781 * at the end also needs to be removed.
3783 recv_msg->msg.data_len = msg->rsp_size - 10;
3784 memcpy(recv_msg->msg_data, &msg->rsp[9],
3785 msg->rsp_size - 10);
3786 if (deliver_response(intf, recv_msg))
3787 ipmi_inc_stat(intf, unhandled_commands);
3789 ipmi_inc_stat(intf, handled_commands);
3796 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3797 struct ipmi_smi_msg *msg)
3799 struct ipmi_lan_addr lan_addr;
3800 struct ipmi_recv_msg *recv_msg;
3804 * This is 13, not 12, because the response must contain a
3807 if (msg->rsp_size < 13) {
3808 /* Message not big enough, just ignore it. */
3809 ipmi_inc_stat(intf, invalid_lan_responses);
3813 if (msg->rsp[2] != 0) {
3814 /* An error getting the response, just ignore it. */
3818 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3819 lan_addr.session_handle = msg->rsp[4];
3820 lan_addr.remote_SWID = msg->rsp[8];
3821 lan_addr.local_SWID = msg->rsp[5];
3822 lan_addr.channel = msg->rsp[3] & 0x0f;
3823 lan_addr.privilege = msg->rsp[3] >> 4;
3824 lan_addr.lun = msg->rsp[9] & 3;
3827 * It's a response from a remote entity. Look up the sequence
3828 * number and handle the response.
3830 if (intf_find_seq(intf,
3834 (msg->rsp[6] >> 2) & (~1),
3835 (struct ipmi_addr *) &lan_addr,
3838 * We were unable to find the sequence number,
3839 * so just nuke the message.
3841 ipmi_inc_stat(intf, unhandled_lan_responses);
3845 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3847 * The other fields matched, so no need to set them, except
3848 * for netfn, which needs to be the response that was
3849 * returned, not the request value.
3851 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3852 recv_msg->msg.data = recv_msg->msg_data;
3853 recv_msg->msg.data_len = msg->rsp_size - 12;
3854 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3855 if (deliver_response(intf, recv_msg))
3856 ipmi_inc_stat(intf, unhandled_lan_responses);
3858 ipmi_inc_stat(intf, handled_lan_responses);
3863 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3864 struct ipmi_smi_msg *msg)
3866 struct cmd_rcvr *rcvr;
3868 unsigned char netfn;
3871 struct ipmi_user *user = NULL;
3872 struct ipmi_lan_addr *lan_addr;
3873 struct ipmi_recv_msg *recv_msg;
3875 if (msg->rsp_size < 12) {
3876 /* Message not big enough, just ignore it. */
3877 ipmi_inc_stat(intf, invalid_commands);
3881 if (msg->rsp[2] != 0) {
3882 /* An error getting the response, just ignore it. */
3886 netfn = msg->rsp[6] >> 2;
3888 chan = msg->rsp[3] & 0xf;
3891 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3894 kref_get(&user->refcount);
3900 /* We didn't find a user, just give up. */
3901 ipmi_inc_stat(intf, unhandled_commands);
3904 * Don't do anything with these messages, just allow
3909 recv_msg = ipmi_alloc_recv_msg();
3912 * We couldn't allocate memory for the
3913 * message, so requeue it for handling later.
3916 kref_put(&user->refcount, free_user);
3918 /* Extract the source address from the data. */
3919 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3920 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3921 lan_addr->session_handle = msg->rsp[4];
3922 lan_addr->remote_SWID = msg->rsp[8];
3923 lan_addr->local_SWID = msg->rsp[5];
3924 lan_addr->lun = msg->rsp[9] & 3;
3925 lan_addr->channel = msg->rsp[3] & 0xf;
3926 lan_addr->privilege = msg->rsp[3] >> 4;
3929 * Extract the rest of the message information
3930 * from the IPMB header.
3932 recv_msg->user = user;
3933 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3934 recv_msg->msgid = msg->rsp[9] >> 2;
3935 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3936 recv_msg->msg.cmd = msg->rsp[10];
3937 recv_msg->msg.data = recv_msg->msg_data;
3940 * We chop off 12, not 11 bytes because the checksum
3941 * at the end also needs to be removed.
3943 recv_msg->msg.data_len = msg->rsp_size - 12;
3944 memcpy(recv_msg->msg_data, &msg->rsp[11],
3945 msg->rsp_size - 12);
3946 if (deliver_response(intf, recv_msg))
3947 ipmi_inc_stat(intf, unhandled_commands);
3949 ipmi_inc_stat(intf, handled_commands);
3957 * This routine will handle "Get Message" command responses with
3958 * channels that use an OEM Medium. The message format belongs to
3959 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3960 * Chapter 22, sections 22.6 and 22.24 for more details.
3962 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3963 struct ipmi_smi_msg *msg)
3965 struct cmd_rcvr *rcvr;
3967 unsigned char netfn;
3970 struct ipmi_user *user = NULL;
3971 struct ipmi_system_interface_addr *smi_addr;
3972 struct ipmi_recv_msg *recv_msg;
3975 * We expect the OEM SW to perform error checking
3976 * so we just do some basic sanity checks
3978 if (msg->rsp_size < 4) {
3979 /* Message not big enough, just ignore it. */
3980 ipmi_inc_stat(intf, invalid_commands);
3984 if (msg->rsp[2] != 0) {
3985 /* An error getting the response, just ignore it. */
3990 * This is an OEM Message so the OEM needs to know how
3991 * handle the message. We do no interpretation.
3993 netfn = msg->rsp[0] >> 2;
3995 chan = msg->rsp[3] & 0xf;
3998 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4001 kref_get(&user->refcount);
4007 /* We didn't find a user, just give up. */
4008 ipmi_inc_stat(intf, unhandled_commands);
4011 * Don't do anything with these messages, just allow
4017 recv_msg = ipmi_alloc_recv_msg();
4020 * We couldn't allocate memory for the
4021 * message, so requeue it for handling
4025 kref_put(&user->refcount, free_user);
4028 * OEM Messages are expected to be delivered via
4029 * the system interface to SMS software. We might
4030 * need to visit this again depending on OEM
4033 smi_addr = ((struct ipmi_system_interface_addr *)
4035 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4036 smi_addr->channel = IPMI_BMC_CHANNEL;
4037 smi_addr->lun = msg->rsp[0] & 3;
4039 recv_msg->user = user;
4040 recv_msg->user_msg_data = NULL;
4041 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4042 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4043 recv_msg->msg.cmd = msg->rsp[1];
4044 recv_msg->msg.data = recv_msg->msg_data;
4047 * The message starts at byte 4 which follows the
4048 * the Channel Byte in the "GET MESSAGE" command
4050 recv_msg->msg.data_len = msg->rsp_size - 4;
4051 memcpy(recv_msg->msg_data, &msg->rsp[4],
4053 if (deliver_response(intf, recv_msg))
4054 ipmi_inc_stat(intf, unhandled_commands);
4056 ipmi_inc_stat(intf, handled_commands);
4063 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4064 struct ipmi_smi_msg *msg)
4066 struct ipmi_system_interface_addr *smi_addr;
4068 recv_msg->msgid = 0;
4069 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4070 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4071 smi_addr->channel = IPMI_BMC_CHANNEL;
4072 smi_addr->lun = msg->rsp[0] & 3;
4073 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4074 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4075 recv_msg->msg.cmd = msg->rsp[1];
4076 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4077 recv_msg->msg.data = recv_msg->msg_data;
4078 recv_msg->msg.data_len = msg->rsp_size - 3;
4081 static int handle_read_event_rsp(struct ipmi_smi *intf,
4082 struct ipmi_smi_msg *msg)
4084 struct ipmi_recv_msg *recv_msg, *recv_msg2;
4085 struct list_head msgs;
4086 struct ipmi_user *user;
4087 int rv = 0, deliver_count = 0, index;
4088 unsigned long flags;
4090 if (msg->rsp_size < 19) {
4091 /* Message is too small to be an IPMB event. */
4092 ipmi_inc_stat(intf, invalid_events);
4096 if (msg->rsp[2] != 0) {
4097 /* An error getting the event, just ignore it. */
4101 INIT_LIST_HEAD(&msgs);
4103 spin_lock_irqsave(&intf->events_lock, flags);
4105 ipmi_inc_stat(intf, events);
4108 * Allocate and fill in one message for every user that is
4111 index = srcu_read_lock(&intf->users_srcu);
4112 list_for_each_entry_rcu(user, &intf->users, link) {
4113 if (!user->gets_events)
4116 recv_msg = ipmi_alloc_recv_msg();
4119 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4121 list_del(&recv_msg->link);
4122 ipmi_free_recv_msg(recv_msg);
4125 * We couldn't allocate memory for the
4126 * message, so requeue it for handling
4135 copy_event_into_recv_msg(recv_msg, msg);
4136 recv_msg->user = user;
4137 kref_get(&user->refcount);
4138 list_add_tail(&recv_msg->link, &msgs);
4140 srcu_read_unlock(&intf->users_srcu, index);
4142 if (deliver_count) {
4143 /* Now deliver all the messages. */
4144 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4145 list_del(&recv_msg->link);
4146 deliver_local_response(intf, recv_msg);
4148 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4150 * No one to receive the message, put it in queue if there's
4151 * not already too many things in the queue.
4153 recv_msg = ipmi_alloc_recv_msg();
4156 * We couldn't allocate memory for the
4157 * message, so requeue it for handling
4164 copy_event_into_recv_msg(recv_msg, msg);
4165 list_add_tail(&recv_msg->link, &intf->waiting_events);
4166 intf->waiting_events_count++;
4167 } else if (!intf->event_msg_printed) {
4169 * There's too many things in the queue, discard this
4172 dev_warn(intf->si_dev,
4173 "Event queue full, discarding incoming events\n");
4174 intf->event_msg_printed = 1;
4178 spin_unlock_irqrestore(&intf->events_lock, flags);
4183 static int handle_bmc_rsp(struct ipmi_smi *intf,
4184 struct ipmi_smi_msg *msg)
4186 struct ipmi_recv_msg *recv_msg;
4187 struct ipmi_system_interface_addr *smi_addr;
4189 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4190 if (recv_msg == NULL) {
4191 dev_warn(intf->si_dev,
4192 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4196 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4197 recv_msg->msgid = msg->msgid;
4198 smi_addr = ((struct ipmi_system_interface_addr *)
4200 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4201 smi_addr->channel = IPMI_BMC_CHANNEL;
4202 smi_addr->lun = msg->rsp[0] & 3;
4203 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4204 recv_msg->msg.cmd = msg->rsp[1];
4205 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4206 recv_msg->msg.data = recv_msg->msg_data;
4207 recv_msg->msg.data_len = msg->rsp_size - 2;
4208 deliver_local_response(intf, recv_msg);
4214 * Handle a received message. Return 1 if the message should be requeued,
4215 * 0 if the message should be freed, or -1 if the message should not
4216 * be freed or requeued.
4218 static int handle_one_recv_msg(struct ipmi_smi *intf,
4219 struct ipmi_smi_msg *msg)
4224 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4226 if ((msg->data_size >= 2)
4227 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4228 && (msg->data[1] == IPMI_SEND_MSG_CMD)
4229 && (msg->user_data == NULL)) {
4231 if (intf->in_shutdown)
4235 * This is the local response to a command send, start
4236 * the timer for these. The user_data will not be
4237 * NULL if this is a response send, and we will let
4238 * response sends just go through.
4242 * Check for errors, if we get certain errors (ones
4243 * that mean basically we can try again later), we
4244 * ignore them and start the timer. Otherwise we
4245 * report the error immediately.
4247 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4248 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4249 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4250 && (msg->rsp[2] != IPMI_BUS_ERR)
4251 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4252 int ch = msg->rsp[3] & 0xf;
4253 struct ipmi_channel *chans;
4255 /* Got an error sending the message, handle it. */
4257 chans = READ_ONCE(intf->channel_list)->c;
4258 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4259 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4260 ipmi_inc_stat(intf, sent_lan_command_errs);
4262 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4263 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4265 /* The message was sent, start the timer. */
4266 intf_start_seq_timer(intf, msg->msgid);
4271 } else if (msg->rsp_size < 2) {
4272 /* Message is too small to be correct. */
4273 dev_warn(intf->si_dev,
4274 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4275 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4277 /* Generate an error response for the message. */
4278 msg->rsp[0] = msg->data[0] | (1 << 2);
4279 msg->rsp[1] = msg->data[1];
4280 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4282 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4283 || (msg->rsp[1] != msg->data[1])) {
4285 * The NetFN and Command in the response is not even
4286 * marginally correct.
4288 dev_warn(intf->si_dev,
4289 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4290 (msg->data[0] >> 2) | 1, msg->data[1],
4291 msg->rsp[0] >> 2, msg->rsp[1]);
4293 /* Generate an error response for the message. */
4294 msg->rsp[0] = msg->data[0] | (1 << 2);
4295 msg->rsp[1] = msg->data[1];
4296 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4300 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4301 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4302 && (msg->user_data != NULL)) {
4304 * It's a response to a response we sent. For this we
4305 * deliver a send message response to the user.
4307 struct ipmi_recv_msg *recv_msg = msg->user_data;
4310 if (msg->rsp_size < 2)
4311 /* Message is too small to be correct. */
4314 chan = msg->data[2] & 0x0f;
4315 if (chan >= IPMI_MAX_CHANNELS)
4316 /* Invalid channel number */
4322 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4323 recv_msg->msg.data = recv_msg->msg_data;
4324 recv_msg->msg.data_len = 1;
4325 recv_msg->msg_data[0] = msg->rsp[2];
4326 deliver_local_response(intf, recv_msg);
4327 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4328 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4329 struct ipmi_channel *chans;
4331 /* It's from the receive queue. */
4332 chan = msg->rsp[3] & 0xf;
4333 if (chan >= IPMI_MAX_CHANNELS) {
4334 /* Invalid channel number */
4340 * We need to make sure the channels have been initialized.
4341 * The channel_handler routine will set the "curr_channel"
4342 * equal to or greater than IPMI_MAX_CHANNELS when all the
4343 * channels for this interface have been initialized.
4345 if (!intf->channels_ready) {
4346 requeue = 0; /* Throw the message away */
4350 chans = READ_ONCE(intf->channel_list)->c;
4352 switch (chans[chan].medium) {
4353 case IPMI_CHANNEL_MEDIUM_IPMB:
4354 if (msg->rsp[4] & 0x04) {
4356 * It's a response, so find the
4357 * requesting message and send it up.
4359 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4362 * It's a command to the SMS from some other
4363 * entity. Handle that.
4365 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4369 case IPMI_CHANNEL_MEDIUM_8023LAN:
4370 case IPMI_CHANNEL_MEDIUM_ASYNC:
4371 if (msg->rsp[6] & 0x04) {
4373 * It's a response, so find the
4374 * requesting message and send it up.
4376 requeue = handle_lan_get_msg_rsp(intf, msg);
4379 * It's a command to the SMS from some other
4380 * entity. Handle that.
4382 requeue = handle_lan_get_msg_cmd(intf, msg);
4387 /* Check for OEM Channels. Clients had better
4388 register for these commands. */
4389 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4390 && (chans[chan].medium
4391 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4392 requeue = handle_oem_get_msg_cmd(intf, msg);
4395 * We don't handle the channel type, so just
4402 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4403 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4404 /* It's an asynchronous event. */
4405 requeue = handle_read_event_rsp(intf, msg);
4407 /* It's a response from the local BMC. */
4408 requeue = handle_bmc_rsp(intf, msg);
4416 * If there are messages in the queue or pretimeouts, handle them.
4418 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4420 struct ipmi_smi_msg *smi_msg;
4421 unsigned long flags = 0;
4423 int run_to_completion = intf->run_to_completion;
4425 /* See if any waiting messages need to be processed. */
4426 if (!run_to_completion)
4427 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4428 while (!list_empty(&intf->waiting_rcv_msgs)) {
4429 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4430 struct ipmi_smi_msg, link);
4431 list_del(&smi_msg->link);
4432 if (!run_to_completion)
4433 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4435 rv = handle_one_recv_msg(intf, smi_msg);
4436 if (!run_to_completion)
4437 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4440 * To preserve message order, quit if we
4441 * can't handle a message. Add the message
4442 * back at the head, this is safe because this
4443 * tasklet is the only thing that pulls the
4446 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4450 /* Message handled */
4451 ipmi_free_smi_msg(smi_msg);
4452 /* If rv < 0, fatal error, del but don't free. */
4455 if (!run_to_completion)
4456 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4459 * If the pretimout count is non-zero, decrement one from it and
4460 * deliver pretimeouts to all the users.
4462 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4463 struct ipmi_user *user;
4466 index = srcu_read_lock(&intf->users_srcu);
4467 list_for_each_entry_rcu(user, &intf->users, link) {
4468 if (user->handler->ipmi_watchdog_pretimeout)
4469 user->handler->ipmi_watchdog_pretimeout(
4470 user->handler_data);
4472 srcu_read_unlock(&intf->users_srcu, index);
4476 static void smi_recv_tasklet(struct tasklet_struct *t)
4478 unsigned long flags = 0; /* keep us warning-free. */
4479 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4480 int run_to_completion = intf->run_to_completion;
4481 struct ipmi_smi_msg *newmsg = NULL;
4484 * Start the next message if available.
4486 * Do this here, not in the actual receiver, because we may deadlock
4487 * because the lower layer is allowed to hold locks while calling
4493 if (!run_to_completion)
4494 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4495 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4496 struct list_head *entry = NULL;
4498 /* Pick the high priority queue first. */
4499 if (!list_empty(&intf->hp_xmit_msgs))
4500 entry = intf->hp_xmit_msgs.next;
4501 else if (!list_empty(&intf->xmit_msgs))
4502 entry = intf->xmit_msgs.next;
4506 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4507 intf->curr_msg = newmsg;
4511 if (!run_to_completion)
4512 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4514 intf->handlers->sender(intf->send_info, newmsg);
4518 handle_new_recv_msgs(intf);
4521 /* Handle a new message from the lower layer. */
4522 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4523 struct ipmi_smi_msg *msg)
4525 unsigned long flags = 0; /* keep us warning-free. */
4526 int run_to_completion = intf->run_to_completion;
4529 * To preserve message order, we keep a queue and deliver from
4532 if (!run_to_completion)
4533 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4534 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4535 if (!run_to_completion)
4536 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4539 if (!run_to_completion)
4540 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4542 * We can get an asynchronous event or receive message in addition
4543 * to commands we send.
4545 if (msg == intf->curr_msg)
4546 intf->curr_msg = NULL;
4547 if (!run_to_completion)
4548 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4550 if (run_to_completion)
4551 smi_recv_tasklet(&intf->recv_tasklet);
4553 tasklet_schedule(&intf->recv_tasklet);
4555 EXPORT_SYMBOL(ipmi_smi_msg_received);
4557 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4559 if (intf->in_shutdown)
4562 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4563 tasklet_schedule(&intf->recv_tasklet);
4565 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4567 static struct ipmi_smi_msg *
4568 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4569 unsigned char seq, long seqid)
4571 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4574 * If we can't allocate the message, then just return, we
4575 * get 4 retries, so this should be ok.
4579 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4580 smi_msg->data_size = recv_msg->msg.data_len;
4581 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4583 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
4588 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4589 struct list_head *timeouts,
4590 unsigned long timeout_period,
4591 int slot, unsigned long *flags,
4594 struct ipmi_recv_msg *msg;
4596 if (intf->in_shutdown)
4602 if (timeout_period < ent->timeout) {
4603 ent->timeout -= timeout_period;
4608 if (ent->retries_left == 0) {
4609 /* The message has used all its retries. */
4611 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4612 msg = ent->recv_msg;
4613 list_add_tail(&msg->link, timeouts);
4615 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4616 else if (is_lan_addr(&ent->recv_msg->addr))
4617 ipmi_inc_stat(intf, timed_out_lan_commands);
4619 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4621 struct ipmi_smi_msg *smi_msg;
4622 /* More retries, send again. */
4627 * Start with the max timer, set to normal timer after
4628 * the message is sent.
4630 ent->timeout = MAX_MSG_TIMEOUT;
4631 ent->retries_left--;
4632 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4635 if (is_lan_addr(&ent->recv_msg->addr))
4637 dropped_rexmit_lan_commands);
4640 dropped_rexmit_ipmb_commands);
4644 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4647 * Send the new message. We send with a zero
4648 * priority. It timed out, I doubt time is that
4649 * critical now, and high priority messages are really
4650 * only for messages to the local MC, which don't get
4653 if (intf->handlers) {
4654 if (is_lan_addr(&ent->recv_msg->addr))
4656 retransmitted_lan_commands);
4659 retransmitted_ipmb_commands);
4661 smi_send(intf, intf->handlers, smi_msg, 0);
4663 ipmi_free_smi_msg(smi_msg);
4665 spin_lock_irqsave(&intf->seq_lock, *flags);
4669 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4670 unsigned long timeout_period)
4672 struct list_head timeouts;
4673 struct ipmi_recv_msg *msg, *msg2;
4674 unsigned long flags;
4676 bool need_timer = false;
4678 if (!intf->bmc_registered) {
4679 kref_get(&intf->refcount);
4680 if (!schedule_work(&intf->bmc_reg_work)) {
4681 kref_put(&intf->refcount, intf_free);
4687 * Go through the seq table and find any messages that
4688 * have timed out, putting them in the timeouts
4691 INIT_LIST_HEAD(&timeouts);
4692 spin_lock_irqsave(&intf->seq_lock, flags);
4693 if (intf->ipmb_maintenance_mode_timeout) {
4694 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4695 intf->ipmb_maintenance_mode_timeout = 0;
4697 intf->ipmb_maintenance_mode_timeout -= timeout_period;
4699 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4700 check_msg_timeout(intf, &intf->seq_table[i],
4701 &timeouts, timeout_period, i,
4702 &flags, &need_timer);
4703 spin_unlock_irqrestore(&intf->seq_lock, flags);
4705 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4706 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4709 * Maintenance mode handling. Check the timeout
4710 * optimistically before we claim the lock. It may
4711 * mean a timeout gets missed occasionally, but that
4712 * only means the timeout gets extended by one period
4713 * in that case. No big deal, and it avoids the lock
4716 if (intf->auto_maintenance_timeout > 0) {
4717 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4718 if (intf->auto_maintenance_timeout > 0) {
4719 intf->auto_maintenance_timeout
4721 if (!intf->maintenance_mode
4722 && (intf->auto_maintenance_timeout <= 0)) {
4723 intf->maintenance_mode_enable = false;
4724 maintenance_mode_update(intf);
4727 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4731 tasklet_schedule(&intf->recv_tasklet);
4736 static void ipmi_request_event(struct ipmi_smi *intf)
4738 /* No event requests when in maintenance mode. */
4739 if (intf->maintenance_mode_enable)
4742 if (!intf->in_shutdown)
4743 intf->handlers->request_events(intf->send_info);
4746 static struct timer_list ipmi_timer;
4748 static atomic_t stop_operation;
4750 static void ipmi_timeout(struct timer_list *unused)
4752 struct ipmi_smi *intf;
4753 bool need_timer = false;
4756 if (atomic_read(&stop_operation))
4759 index = srcu_read_lock(&ipmi_interfaces_srcu);
4760 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4761 if (atomic_read(&intf->event_waiters)) {
4762 intf->ticks_to_req_ev--;
4763 if (intf->ticks_to_req_ev == 0) {
4764 ipmi_request_event(intf);
4765 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4770 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4772 srcu_read_unlock(&ipmi_interfaces_srcu, index);
4775 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4778 static void need_waiter(struct ipmi_smi *intf)
4780 /* Racy, but worst case we start the timer twice. */
4781 if (!timer_pending(&ipmi_timer))
4782 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4785 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4786 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4788 static void free_smi_msg(struct ipmi_smi_msg *msg)
4790 atomic_dec(&smi_msg_inuse_count);
4794 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4796 struct ipmi_smi_msg *rv;
4797 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4799 rv->done = free_smi_msg;
4800 rv->user_data = NULL;
4801 atomic_inc(&smi_msg_inuse_count);
4805 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4807 static void free_recv_msg(struct ipmi_recv_msg *msg)
4809 atomic_dec(&recv_msg_inuse_count);
4813 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4815 struct ipmi_recv_msg *rv;
4817 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4820 rv->done = free_recv_msg;
4821 atomic_inc(&recv_msg_inuse_count);
4826 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4829 kref_put(&msg->user->refcount, free_user);
4832 EXPORT_SYMBOL(ipmi_free_recv_msg);
4834 static atomic_t panic_done_count = ATOMIC_INIT(0);
4836 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4838 atomic_dec(&panic_done_count);
4841 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4843 atomic_dec(&panic_done_count);
4847 * Inside a panic, send a message and wait for a response.
4849 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4850 struct ipmi_addr *addr,
4851 struct kernel_ipmi_msg *msg)
4853 struct ipmi_smi_msg smi_msg;
4854 struct ipmi_recv_msg recv_msg;
4857 smi_msg.done = dummy_smi_done_handler;
4858 recv_msg.done = dummy_recv_done_handler;
4859 atomic_add(2, &panic_done_count);
4860 rv = i_ipmi_request(NULL,
4869 intf->addrinfo[0].address,
4870 intf->addrinfo[0].lun,
4871 0, 1); /* Don't retry, and don't wait. */
4873 atomic_sub(2, &panic_done_count);
4874 else if (intf->handlers->flush_messages)
4875 intf->handlers->flush_messages(intf->send_info);
4877 while (atomic_read(&panic_done_count) != 0)
4881 static void event_receiver_fetcher(struct ipmi_smi *intf,
4882 struct ipmi_recv_msg *msg)
4884 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4885 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4886 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4887 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4888 /* A get event receiver command, save it. */
4889 intf->event_receiver = msg->msg.data[1];
4890 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4894 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4896 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4897 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4898 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4899 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4901 * A get device id command, save if we are an event
4902 * receiver or generator.
4904 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4905 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4909 static void send_panic_events(struct ipmi_smi *intf, char *str)
4911 struct kernel_ipmi_msg msg;
4912 unsigned char data[16];
4913 struct ipmi_system_interface_addr *si;
4914 struct ipmi_addr addr;
4916 struct ipmi_ipmb_addr *ipmb;
4919 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4922 si = (struct ipmi_system_interface_addr *) &addr;
4923 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4924 si->channel = IPMI_BMC_CHANNEL;
4927 /* Fill in an event telling that we have failed. */
4928 msg.netfn = 0x04; /* Sensor or Event. */
4929 msg.cmd = 2; /* Platform event command. */
4932 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4933 data[1] = 0x03; /* This is for IPMI 1.0. */
4934 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4935 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4936 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4939 * Put a few breadcrumbs in. Hopefully later we can add more things
4940 * to make the panic events more useful.
4948 /* Send the event announcing the panic. */
4949 ipmi_panic_request_and_wait(intf, &addr, &msg);
4952 * On every interface, dump a bunch of OEM event holding the
4955 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4959 * intf_num is used as an marker to tell if the
4960 * interface is valid. Thus we need a read barrier to
4961 * make sure data fetched before checking intf_num
4967 * First job here is to figure out where to send the
4968 * OEM events. There's no way in IPMI to send OEM
4969 * events using an event send command, so we have to
4970 * find the SEL to put them in and stick them in
4974 /* Get capabilities from the get device id. */
4975 intf->local_sel_device = 0;
4976 intf->local_event_generator = 0;
4977 intf->event_receiver = 0;
4979 /* Request the device info from the local MC. */
4980 msg.netfn = IPMI_NETFN_APP_REQUEST;
4981 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4984 intf->null_user_handler = device_id_fetcher;
4985 ipmi_panic_request_and_wait(intf, &addr, &msg);
4987 if (intf->local_event_generator) {
4988 /* Request the event receiver from the local MC. */
4989 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4990 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4993 intf->null_user_handler = event_receiver_fetcher;
4994 ipmi_panic_request_and_wait(intf, &addr, &msg);
4996 intf->null_user_handler = NULL;
4999 * Validate the event receiver. The low bit must not
5000 * be 1 (it must be a valid IPMB address), it cannot
5001 * be zero, and it must not be my address.
5003 if (((intf->event_receiver & 1) == 0)
5004 && (intf->event_receiver != 0)
5005 && (intf->event_receiver != intf->addrinfo[0].address)) {
5007 * The event receiver is valid, send an IPMB
5010 ipmb = (struct ipmi_ipmb_addr *) &addr;
5011 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5012 ipmb->channel = 0; /* FIXME - is this right? */
5013 ipmb->lun = intf->event_receiver_lun;
5014 ipmb->slave_addr = intf->event_receiver;
5015 } else if (intf->local_sel_device) {
5017 * The event receiver was not valid (or was
5018 * me), but I am an SEL device, just dump it
5021 si = (struct ipmi_system_interface_addr *) &addr;
5022 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5023 si->channel = IPMI_BMC_CHANNEL;
5026 return; /* No where to send the event. */
5028 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5029 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5035 int size = strlen(p);
5041 data[2] = 0xf0; /* OEM event without timestamp. */
5042 data[3] = intf->addrinfo[0].address;
5043 data[4] = j++; /* sequence # */
5045 * Always give 11 bytes, so strncpy will fill
5046 * it with zeroes for me.
5048 strncpy(data+5, p, 11);
5051 ipmi_panic_request_and_wait(intf, &addr, &msg);
5055 static int has_panicked;
5057 static int panic_event(struct notifier_block *this,
5058 unsigned long event,
5061 struct ipmi_smi *intf;
5062 struct ipmi_user *user;
5068 /* For every registered interface, set it to run to completion. */
5069 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5070 if (!intf->handlers || intf->intf_num == -1)
5071 /* Interface is not ready. */
5074 if (!intf->handlers->poll)
5078 * If we were interrupted while locking xmit_msgs_lock or
5079 * waiting_rcv_msgs_lock, the corresponding list may be
5080 * corrupted. In this case, drop items on the list for
5083 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5084 INIT_LIST_HEAD(&intf->xmit_msgs);
5085 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5087 spin_unlock(&intf->xmit_msgs_lock);
5089 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5090 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5092 spin_unlock(&intf->waiting_rcv_msgs_lock);
5094 intf->run_to_completion = 1;
5095 if (intf->handlers->set_run_to_completion)
5096 intf->handlers->set_run_to_completion(intf->send_info,
5099 list_for_each_entry_rcu(user, &intf->users, link) {
5100 if (user->handler->ipmi_panic_handler)
5101 user->handler->ipmi_panic_handler(
5102 user->handler_data);
5105 send_panic_events(intf, ptr);
5111 /* Must be called with ipmi_interfaces_mutex held. */
5112 static int ipmi_register_driver(void)
5119 rv = driver_register(&ipmidriver.driver);
5121 pr_err("Could not register IPMI driver\n");
5123 drvregistered = true;
5127 static struct notifier_block panic_block = {
5128 .notifier_call = panic_event,
5130 .priority = 200 /* priority: INT_MAX >= x >= 0 */
5133 static int ipmi_init_msghandler(void)
5137 mutex_lock(&ipmi_interfaces_mutex);
5138 rv = ipmi_register_driver();
5144 init_srcu_struct(&ipmi_interfaces_srcu);
5146 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5147 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5149 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5154 mutex_unlock(&ipmi_interfaces_mutex);
5158 static int __init ipmi_init_msghandler_mod(void)
5162 pr_info("version " IPMI_DRIVER_VERSION "\n");
5164 mutex_lock(&ipmi_interfaces_mutex);
5165 rv = ipmi_register_driver();
5166 mutex_unlock(&ipmi_interfaces_mutex);
5171 static void __exit cleanup_ipmi(void)
5176 atomic_notifier_chain_unregister(&panic_notifier_list,
5180 * This can't be called if any interfaces exist, so no worry
5181 * about shutting down the interfaces.
5185 * Tell the timer to stop, then wait for it to stop. This
5186 * avoids problems with race conditions removing the timer
5189 atomic_set(&stop_operation, 1);
5190 del_timer_sync(&ipmi_timer);
5192 initialized = false;
5194 /* Check for buffer leaks. */
5195 count = atomic_read(&smi_msg_inuse_count);
5197 pr_warn("SMI message count %d at exit\n", count);
5198 count = atomic_read(&recv_msg_inuse_count);
5200 pr_warn("recv message count %d at exit\n", count);
5202 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5205 driver_unregister(&ipmidriver.driver);
5207 module_exit(cleanup_ipmi);
5209 module_init(ipmi_init_msghandler_mod);
5210 MODULE_LICENSE("GPL");
5211 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5212 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5213 MODULE_VERSION(IPMI_DRIVER_VERSION);
5214 MODULE_SOFTDEP("post: ipmi_devintf");