Merge branch 'page-refs' (page ref overflow)
[linux-2.6-microblaze.git] / drivers / char / ipmi / ipmi_msghandler.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_msghandler.c
4  *
5  * Incoming and outgoing message routing for an IPMI interface.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13
14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
15 #define dev_fmt pr_fmt
16
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/poll.h>
20 #include <linux/sched.h>
21 #include <linux/seq_file.h>
22 #include <linux/spinlock.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/ipmi.h>
26 #include <linux/ipmi_smi.h>
27 #include <linux/notifier.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/rcupdate.h>
31 #include <linux/interrupt.h>
32 #include <linux/moduleparam.h>
33 #include <linux/workqueue.h>
34 #include <linux/uuid.h>
35 #include <linux/nospec.h>
36
37 #define IPMI_DRIVER_VERSION "39.2"
38
39 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
40 static int ipmi_init_msghandler(void);
41 static void smi_recv_tasklet(unsigned long);
42 static void handle_new_recv_msgs(struct ipmi_smi *intf);
43 static void need_waiter(struct ipmi_smi *intf);
44 static int handle_one_recv_msg(struct ipmi_smi *intf,
45                                struct ipmi_smi_msg *msg);
46
47 #ifdef DEBUG
48 static void ipmi_debug_msg(const char *title, unsigned char *data,
49                            unsigned int len)
50 {
51         int i, pos;
52         char buf[100];
53
54         pos = snprintf(buf, sizeof(buf), "%s: ", title);
55         for (i = 0; i < len; i++)
56                 pos += snprintf(buf + pos, sizeof(buf) - pos,
57                                 " %2.2x", data[i]);
58         pr_debug("%s\n", buf);
59 }
60 #else
61 static void ipmi_debug_msg(const char *title, unsigned char *data,
62                            unsigned int len)
63 { }
64 #endif
65
66 static bool initialized;
67 static bool drvregistered;
68
69 enum ipmi_panic_event_op {
70         IPMI_SEND_PANIC_EVENT_NONE,
71         IPMI_SEND_PANIC_EVENT,
72         IPMI_SEND_PANIC_EVENT_STRING
73 };
74 #ifdef CONFIG_IPMI_PANIC_STRING
75 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
76 #elif defined(CONFIG_IPMI_PANIC_EVENT)
77 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
78 #else
79 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
80 #endif
81 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
82
83 static int panic_op_write_handler(const char *val,
84                                   const struct kernel_param *kp)
85 {
86         char valcp[16];
87         char *s;
88
89         strncpy(valcp, val, 15);
90         valcp[15] = '\0';
91
92         s = strstrip(valcp);
93
94         if (strcmp(s, "none") == 0)
95                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
96         else if (strcmp(s, "event") == 0)
97                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
98         else if (strcmp(s, "string") == 0)
99                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
100         else
101                 return -EINVAL;
102
103         return 0;
104 }
105
106 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
107 {
108         switch (ipmi_send_panic_event) {
109         case IPMI_SEND_PANIC_EVENT_NONE:
110                 strcpy(buffer, "none");
111                 break;
112
113         case IPMI_SEND_PANIC_EVENT:
114                 strcpy(buffer, "event");
115                 break;
116
117         case IPMI_SEND_PANIC_EVENT_STRING:
118                 strcpy(buffer, "string");
119                 break;
120
121         default:
122                 strcpy(buffer, "???");
123                 break;
124         }
125
126         return strlen(buffer);
127 }
128
129 static const struct kernel_param_ops panic_op_ops = {
130         .set = panic_op_write_handler,
131         .get = panic_op_read_handler
132 };
133 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
134 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
135
136
137 #define MAX_EVENTS_IN_QUEUE     25
138
139 /* Remain in auto-maintenance mode for this amount of time (in ms). */
140 static unsigned long maintenance_mode_timeout_ms = 30000;
141 module_param(maintenance_mode_timeout_ms, ulong, 0644);
142 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
143                  "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
144
145 /*
146  * Don't let a message sit in a queue forever, always time it with at lest
147  * the max message timer.  This is in milliseconds.
148  */
149 #define MAX_MSG_TIMEOUT         60000
150
151 /*
152  * Timeout times below are in milliseconds, and are done off a 1
153  * second timer.  So setting the value to 1000 would mean anything
154  * between 0 and 1000ms.  So really the only reasonable minimum
155  * setting it 2000ms, which is between 1 and 2 seconds.
156  */
157
158 /* The default timeout for message retries. */
159 static unsigned long default_retry_ms = 2000;
160 module_param(default_retry_ms, ulong, 0644);
161 MODULE_PARM_DESC(default_retry_ms,
162                  "The time (milliseconds) between retry sends");
163
164 /* The default timeout for maintenance mode message retries. */
165 static unsigned long default_maintenance_retry_ms = 3000;
166 module_param(default_maintenance_retry_ms, ulong, 0644);
167 MODULE_PARM_DESC(default_maintenance_retry_ms,
168                  "The time (milliseconds) between retry sends in maintenance mode");
169
170 /* The default maximum number of retries */
171 static unsigned int default_max_retries = 4;
172 module_param(default_max_retries, uint, 0644);
173 MODULE_PARM_DESC(default_max_retries,
174                  "The time (milliseconds) between retry sends in maintenance mode");
175
176 /* Call every ~1000 ms. */
177 #define IPMI_TIMEOUT_TIME       1000
178
179 /* How many jiffies does it take to get to the timeout time. */
180 #define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
181
182 /*
183  * Request events from the queue every second (this is the number of
184  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
185  * future, IPMI will add a way to know immediately if an event is in
186  * the queue and this silliness can go away.
187  */
188 #define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
189
190 /* How long should we cache dynamic device IDs? */
191 #define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
192
193 /*
194  * The main "user" data structure.
195  */
196 struct ipmi_user {
197         struct list_head link;
198
199         /*
200          * Set to NULL when the user is destroyed, a pointer to myself
201          * so srcu_dereference can be used on it.
202          */
203         struct ipmi_user *self;
204         struct srcu_struct release_barrier;
205
206         struct kref refcount;
207
208         /* The upper layer that handles receive messages. */
209         const struct ipmi_user_hndl *handler;
210         void             *handler_data;
211
212         /* The interface this user is bound to. */
213         struct ipmi_smi *intf;
214
215         /* Does this interface receive IPMI events? */
216         bool gets_events;
217 };
218
219 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
220         __acquires(user->release_barrier)
221 {
222         struct ipmi_user *ruser;
223
224         *index = srcu_read_lock(&user->release_barrier);
225         ruser = srcu_dereference(user->self, &user->release_barrier);
226         if (!ruser)
227                 srcu_read_unlock(&user->release_barrier, *index);
228         return ruser;
229 }
230
231 static void release_ipmi_user(struct ipmi_user *user, int index)
232 {
233         srcu_read_unlock(&user->release_barrier, index);
234 }
235
236 struct cmd_rcvr {
237         struct list_head link;
238
239         struct ipmi_user *user;
240         unsigned char netfn;
241         unsigned char cmd;
242         unsigned int  chans;
243
244         /*
245          * This is used to form a linked lised during mass deletion.
246          * Since this is in an RCU list, we cannot use the link above
247          * or change any data until the RCU period completes.  So we
248          * use this next variable during mass deletion so we can have
249          * a list and don't have to wait and restart the search on
250          * every individual deletion of a command.
251          */
252         struct cmd_rcvr *next;
253 };
254
255 struct seq_table {
256         unsigned int         inuse : 1;
257         unsigned int         broadcast : 1;
258
259         unsigned long        timeout;
260         unsigned long        orig_timeout;
261         unsigned int         retries_left;
262
263         /*
264          * To verify on an incoming send message response that this is
265          * the message that the response is for, we keep a sequence id
266          * and increment it every time we send a message.
267          */
268         long                 seqid;
269
270         /*
271          * This is held so we can properly respond to the message on a
272          * timeout, and it is used to hold the temporary data for
273          * retransmission, too.
274          */
275         struct ipmi_recv_msg *recv_msg;
276 };
277
278 /*
279  * Store the information in a msgid (long) to allow us to find a
280  * sequence table entry from the msgid.
281  */
282 #define STORE_SEQ_IN_MSGID(seq, seqid) \
283         ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
284
285 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
286         do {                                                            \
287                 seq = (((msgid) >> 26) & 0x3f);                         \
288                 seqid = ((msgid) & 0x3ffffff);                          \
289         } while (0)
290
291 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
292
293 #define IPMI_MAX_CHANNELS       16
294 struct ipmi_channel {
295         unsigned char medium;
296         unsigned char protocol;
297 };
298
299 struct ipmi_channel_set {
300         struct ipmi_channel c[IPMI_MAX_CHANNELS];
301 };
302
303 struct ipmi_my_addrinfo {
304         /*
305          * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
306          * but may be changed by the user.
307          */
308         unsigned char address;
309
310         /*
311          * My LUN.  This should generally stay the SMS LUN, but just in
312          * case...
313          */
314         unsigned char lun;
315 };
316
317 /*
318  * Note that the product id, manufacturer id, guid, and device id are
319  * immutable in this structure, so dyn_mutex is not required for
320  * accessing those.  If those change on a BMC, a new BMC is allocated.
321  */
322 struct bmc_device {
323         struct platform_device pdev;
324         struct list_head       intfs; /* Interfaces on this BMC. */
325         struct ipmi_device_id  id;
326         struct ipmi_device_id  fetch_id;
327         int                    dyn_id_set;
328         unsigned long          dyn_id_expiry;
329         struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
330         guid_t                 guid;
331         guid_t                 fetch_guid;
332         int                    dyn_guid_set;
333         struct kref            usecount;
334         struct work_struct     remove_work;
335 };
336 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
337
338 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
339                              struct ipmi_device_id *id,
340                              bool *guid_set, guid_t *guid);
341
342 /*
343  * Various statistics for IPMI, these index stats[] in the ipmi_smi
344  * structure.
345  */
346 enum ipmi_stat_indexes {
347         /* Commands we got from the user that were invalid. */
348         IPMI_STAT_sent_invalid_commands = 0,
349
350         /* Commands we sent to the MC. */
351         IPMI_STAT_sent_local_commands,
352
353         /* Responses from the MC that were delivered to a user. */
354         IPMI_STAT_handled_local_responses,
355
356         /* Responses from the MC that were not delivered to a user. */
357         IPMI_STAT_unhandled_local_responses,
358
359         /* Commands we sent out to the IPMB bus. */
360         IPMI_STAT_sent_ipmb_commands,
361
362         /* Commands sent on the IPMB that had errors on the SEND CMD */
363         IPMI_STAT_sent_ipmb_command_errs,
364
365         /* Each retransmit increments this count. */
366         IPMI_STAT_retransmitted_ipmb_commands,
367
368         /*
369          * When a message times out (runs out of retransmits) this is
370          * incremented.
371          */
372         IPMI_STAT_timed_out_ipmb_commands,
373
374         /*
375          * This is like above, but for broadcasts.  Broadcasts are
376          * *not* included in the above count (they are expected to
377          * time out).
378          */
379         IPMI_STAT_timed_out_ipmb_broadcasts,
380
381         /* Responses I have sent to the IPMB bus. */
382         IPMI_STAT_sent_ipmb_responses,
383
384         /* The response was delivered to the user. */
385         IPMI_STAT_handled_ipmb_responses,
386
387         /* The response had invalid data in it. */
388         IPMI_STAT_invalid_ipmb_responses,
389
390         /* The response didn't have anyone waiting for it. */
391         IPMI_STAT_unhandled_ipmb_responses,
392
393         /* Commands we sent out to the IPMB bus. */
394         IPMI_STAT_sent_lan_commands,
395
396         /* Commands sent on the IPMB that had errors on the SEND CMD */
397         IPMI_STAT_sent_lan_command_errs,
398
399         /* Each retransmit increments this count. */
400         IPMI_STAT_retransmitted_lan_commands,
401
402         /*
403          * When a message times out (runs out of retransmits) this is
404          * incremented.
405          */
406         IPMI_STAT_timed_out_lan_commands,
407
408         /* Responses I have sent to the IPMB bus. */
409         IPMI_STAT_sent_lan_responses,
410
411         /* The response was delivered to the user. */
412         IPMI_STAT_handled_lan_responses,
413
414         /* The response had invalid data in it. */
415         IPMI_STAT_invalid_lan_responses,
416
417         /* The response didn't have anyone waiting for it. */
418         IPMI_STAT_unhandled_lan_responses,
419
420         /* The command was delivered to the user. */
421         IPMI_STAT_handled_commands,
422
423         /* The command had invalid data in it. */
424         IPMI_STAT_invalid_commands,
425
426         /* The command didn't have anyone waiting for it. */
427         IPMI_STAT_unhandled_commands,
428
429         /* Invalid data in an event. */
430         IPMI_STAT_invalid_events,
431
432         /* Events that were received with the proper format. */
433         IPMI_STAT_events,
434
435         /* Retransmissions on IPMB that failed. */
436         IPMI_STAT_dropped_rexmit_ipmb_commands,
437
438         /* Retransmissions on LAN that failed. */
439         IPMI_STAT_dropped_rexmit_lan_commands,
440
441         /* This *must* remain last, add new values above this. */
442         IPMI_NUM_STATS
443 };
444
445
446 #define IPMI_IPMB_NUM_SEQ       64
447 struct ipmi_smi {
448         /* What interface number are we? */
449         int intf_num;
450
451         struct kref refcount;
452
453         /* Set when the interface is being unregistered. */
454         bool in_shutdown;
455
456         /* Used for a list of interfaces. */
457         struct list_head link;
458
459         /*
460          * The list of upper layers that are using me.  seq_lock write
461          * protects this.  Read protection is with srcu.
462          */
463         struct list_head users;
464         struct srcu_struct users_srcu;
465
466         /* Used for wake ups at startup. */
467         wait_queue_head_t waitq;
468
469         /*
470          * Prevents the interface from being unregistered when the
471          * interface is used by being looked up through the BMC
472          * structure.
473          */
474         struct mutex bmc_reg_mutex;
475
476         struct bmc_device tmp_bmc;
477         struct bmc_device *bmc;
478         bool bmc_registered;
479         struct list_head bmc_link;
480         char *my_dev_name;
481         bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
482         struct work_struct bmc_reg_work;
483
484         const struct ipmi_smi_handlers *handlers;
485         void                     *send_info;
486
487         /* Driver-model device for the system interface. */
488         struct device          *si_dev;
489
490         /*
491          * A table of sequence numbers for this interface.  We use the
492          * sequence numbers for IPMB messages that go out of the
493          * interface to match them up with their responses.  A routine
494          * is called periodically to time the items in this list.
495          */
496         spinlock_t       seq_lock;
497         struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
498         int curr_seq;
499
500         /*
501          * Messages queued for delivery.  If delivery fails (out of memory
502          * for instance), They will stay in here to be processed later in a
503          * periodic timer interrupt.  The tasklet is for handling received
504          * messages directly from the handler.
505          */
506         spinlock_t       waiting_rcv_msgs_lock;
507         struct list_head waiting_rcv_msgs;
508         atomic_t         watchdog_pretimeouts_to_deliver;
509         struct tasklet_struct recv_tasklet;
510
511         spinlock_t             xmit_msgs_lock;
512         struct list_head       xmit_msgs;
513         struct ipmi_smi_msg    *curr_msg;
514         struct list_head       hp_xmit_msgs;
515
516         /*
517          * The list of command receivers that are registered for commands
518          * on this interface.
519          */
520         struct mutex     cmd_rcvrs_mutex;
521         struct list_head cmd_rcvrs;
522
523         /*
524          * Events that were queues because no one was there to receive
525          * them.
526          */
527         spinlock_t       events_lock; /* For dealing with event stuff. */
528         struct list_head waiting_events;
529         unsigned int     waiting_events_count; /* How many events in queue? */
530         char             delivering_events;
531         char             event_msg_printed;
532
533         /* How many users are waiting for events? */
534         atomic_t         event_waiters;
535         unsigned int     ticks_to_req_ev;
536
537         spinlock_t       watch_lock; /* For dealing with watch stuff below. */
538
539         /* How many users are waiting for commands? */
540         unsigned int     command_waiters;
541
542         /* How many users are waiting for watchdogs? */
543         unsigned int     watchdog_waiters;
544
545         /* How many users are waiting for message responses? */
546         unsigned int     response_waiters;
547
548         /*
549          * Tells what the lower layer has last been asked to watch for,
550          * messages and/or watchdogs.  Protected by watch_lock.
551          */
552         unsigned int     last_watch_mask;
553
554         /*
555          * The event receiver for my BMC, only really used at panic
556          * shutdown as a place to store this.
557          */
558         unsigned char event_receiver;
559         unsigned char event_receiver_lun;
560         unsigned char local_sel_device;
561         unsigned char local_event_generator;
562
563         /* For handling of maintenance mode. */
564         int maintenance_mode;
565         bool maintenance_mode_enable;
566         int auto_maintenance_timeout;
567         spinlock_t maintenance_mode_lock; /* Used in a timer... */
568
569         /*
570          * If we are doing maintenance on something on IPMB, extend
571          * the timeout time to avoid timeouts writing firmware and
572          * such.
573          */
574         int ipmb_maintenance_mode_timeout;
575
576         /*
577          * A cheap hack, if this is non-null and a message to an
578          * interface comes in with a NULL user, call this routine with
579          * it.  Note that the message will still be freed by the
580          * caller.  This only works on the system interface.
581          *
582          * Protected by bmc_reg_mutex.
583          */
584         void (*null_user_handler)(struct ipmi_smi *intf,
585                                   struct ipmi_recv_msg *msg);
586
587         /*
588          * When we are scanning the channels for an SMI, this will
589          * tell which channel we are scanning.
590          */
591         int curr_channel;
592
593         /* Channel information */
594         struct ipmi_channel_set *channel_list;
595         unsigned int curr_working_cset; /* First index into the following. */
596         struct ipmi_channel_set wchannels[2];
597         struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
598         bool channels_ready;
599
600         atomic_t stats[IPMI_NUM_STATS];
601
602         /*
603          * run_to_completion duplicate of smb_info, smi_info
604          * and ipmi_serial_info structures. Used to decrease numbers of
605          * parameters passed by "low" level IPMI code.
606          */
607         int run_to_completion;
608 };
609 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
610
611 static void __get_guid(struct ipmi_smi *intf);
612 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
613 static int __ipmi_bmc_register(struct ipmi_smi *intf,
614                                struct ipmi_device_id *id,
615                                bool guid_set, guid_t *guid, int intf_num);
616 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
617
618
619 /**
620  * The driver model view of the IPMI messaging driver.
621  */
622 static struct platform_driver ipmidriver = {
623         .driver = {
624                 .name = "ipmi",
625                 .bus = &platform_bus_type
626         }
627 };
628 /*
629  * This mutex keeps us from adding the same BMC twice.
630  */
631 static DEFINE_MUTEX(ipmidriver_mutex);
632
633 static LIST_HEAD(ipmi_interfaces);
634 static DEFINE_MUTEX(ipmi_interfaces_mutex);
635 struct srcu_struct ipmi_interfaces_srcu;
636
637 /*
638  * List of watchers that want to know when smi's are added and deleted.
639  */
640 static LIST_HEAD(smi_watchers);
641 static DEFINE_MUTEX(smi_watchers_mutex);
642
643 #define ipmi_inc_stat(intf, stat) \
644         atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
645 #define ipmi_get_stat(intf, stat) \
646         ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
647
648 static const char * const addr_src_to_str[] = {
649         "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
650         "device-tree", "platform"
651 };
652
653 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
654 {
655         if (src >= SI_LAST)
656                 src = 0; /* Invalid */
657         return addr_src_to_str[src];
658 }
659 EXPORT_SYMBOL(ipmi_addr_src_to_str);
660
661 static int is_lan_addr(struct ipmi_addr *addr)
662 {
663         return addr->addr_type == IPMI_LAN_ADDR_TYPE;
664 }
665
666 static int is_ipmb_addr(struct ipmi_addr *addr)
667 {
668         return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
669 }
670
671 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
672 {
673         return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
674 }
675
676 static void free_recv_msg_list(struct list_head *q)
677 {
678         struct ipmi_recv_msg *msg, *msg2;
679
680         list_for_each_entry_safe(msg, msg2, q, link) {
681                 list_del(&msg->link);
682                 ipmi_free_recv_msg(msg);
683         }
684 }
685
686 static void free_smi_msg_list(struct list_head *q)
687 {
688         struct ipmi_smi_msg *msg, *msg2;
689
690         list_for_each_entry_safe(msg, msg2, q, link) {
691                 list_del(&msg->link);
692                 ipmi_free_smi_msg(msg);
693         }
694 }
695
696 static void clean_up_interface_data(struct ipmi_smi *intf)
697 {
698         int              i;
699         struct cmd_rcvr  *rcvr, *rcvr2;
700         struct list_head list;
701
702         tasklet_kill(&intf->recv_tasklet);
703
704         free_smi_msg_list(&intf->waiting_rcv_msgs);
705         free_recv_msg_list(&intf->waiting_events);
706
707         /*
708          * Wholesale remove all the entries from the list in the
709          * interface and wait for RCU to know that none are in use.
710          */
711         mutex_lock(&intf->cmd_rcvrs_mutex);
712         INIT_LIST_HEAD(&list);
713         list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
714         mutex_unlock(&intf->cmd_rcvrs_mutex);
715
716         list_for_each_entry_safe(rcvr, rcvr2, &list, link)
717                 kfree(rcvr);
718
719         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
720                 if ((intf->seq_table[i].inuse)
721                                         && (intf->seq_table[i].recv_msg))
722                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
723         }
724 }
725
726 static void intf_free(struct kref *ref)
727 {
728         struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
729
730         clean_up_interface_data(intf);
731         kfree(intf);
732 }
733
734 struct watcher_entry {
735         int              intf_num;
736         struct ipmi_smi  *intf;
737         struct list_head link;
738 };
739
740 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
741 {
742         struct ipmi_smi *intf;
743         int index, rv;
744
745         /*
746          * Make sure the driver is actually initialized, this handles
747          * problems with initialization order.
748          */
749         rv = ipmi_init_msghandler();
750         if (rv)
751                 return rv;
752
753         mutex_lock(&smi_watchers_mutex);
754
755         list_add(&watcher->link, &smi_watchers);
756
757         index = srcu_read_lock(&ipmi_interfaces_srcu);
758         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
759                 int intf_num = READ_ONCE(intf->intf_num);
760
761                 if (intf_num == -1)
762                         continue;
763                 watcher->new_smi(intf_num, intf->si_dev);
764         }
765         srcu_read_unlock(&ipmi_interfaces_srcu, index);
766
767         mutex_unlock(&smi_watchers_mutex);
768
769         return 0;
770 }
771 EXPORT_SYMBOL(ipmi_smi_watcher_register);
772
773 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
774 {
775         mutex_lock(&smi_watchers_mutex);
776         list_del(&watcher->link);
777         mutex_unlock(&smi_watchers_mutex);
778         return 0;
779 }
780 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
781
782 /*
783  * Must be called with smi_watchers_mutex held.
784  */
785 static void
786 call_smi_watchers(int i, struct device *dev)
787 {
788         struct ipmi_smi_watcher *w;
789
790         mutex_lock(&smi_watchers_mutex);
791         list_for_each_entry(w, &smi_watchers, link) {
792                 if (try_module_get(w->owner)) {
793                         w->new_smi(i, dev);
794                         module_put(w->owner);
795                 }
796         }
797         mutex_unlock(&smi_watchers_mutex);
798 }
799
800 static int
801 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
802 {
803         if (addr1->addr_type != addr2->addr_type)
804                 return 0;
805
806         if (addr1->channel != addr2->channel)
807                 return 0;
808
809         if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
810                 struct ipmi_system_interface_addr *smi_addr1
811                     = (struct ipmi_system_interface_addr *) addr1;
812                 struct ipmi_system_interface_addr *smi_addr2
813                     = (struct ipmi_system_interface_addr *) addr2;
814                 return (smi_addr1->lun == smi_addr2->lun);
815         }
816
817         if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
818                 struct ipmi_ipmb_addr *ipmb_addr1
819                     = (struct ipmi_ipmb_addr *) addr1;
820                 struct ipmi_ipmb_addr *ipmb_addr2
821                     = (struct ipmi_ipmb_addr *) addr2;
822
823                 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
824                         && (ipmb_addr1->lun == ipmb_addr2->lun));
825         }
826
827         if (is_lan_addr(addr1)) {
828                 struct ipmi_lan_addr *lan_addr1
829                         = (struct ipmi_lan_addr *) addr1;
830                 struct ipmi_lan_addr *lan_addr2
831                     = (struct ipmi_lan_addr *) addr2;
832
833                 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
834                         && (lan_addr1->local_SWID == lan_addr2->local_SWID)
835                         && (lan_addr1->session_handle
836                             == lan_addr2->session_handle)
837                         && (lan_addr1->lun == lan_addr2->lun));
838         }
839
840         return 1;
841 }
842
843 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
844 {
845         if (len < sizeof(struct ipmi_system_interface_addr))
846                 return -EINVAL;
847
848         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
849                 if (addr->channel != IPMI_BMC_CHANNEL)
850                         return -EINVAL;
851                 return 0;
852         }
853
854         if ((addr->channel == IPMI_BMC_CHANNEL)
855             || (addr->channel >= IPMI_MAX_CHANNELS)
856             || (addr->channel < 0))
857                 return -EINVAL;
858
859         if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
860                 if (len < sizeof(struct ipmi_ipmb_addr))
861                         return -EINVAL;
862                 return 0;
863         }
864
865         if (is_lan_addr(addr)) {
866                 if (len < sizeof(struct ipmi_lan_addr))
867                         return -EINVAL;
868                 return 0;
869         }
870
871         return -EINVAL;
872 }
873 EXPORT_SYMBOL(ipmi_validate_addr);
874
875 unsigned int ipmi_addr_length(int addr_type)
876 {
877         if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
878                 return sizeof(struct ipmi_system_interface_addr);
879
880         if ((addr_type == IPMI_IPMB_ADDR_TYPE)
881                         || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
882                 return sizeof(struct ipmi_ipmb_addr);
883
884         if (addr_type == IPMI_LAN_ADDR_TYPE)
885                 return sizeof(struct ipmi_lan_addr);
886
887         return 0;
888 }
889 EXPORT_SYMBOL(ipmi_addr_length);
890
891 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
892 {
893         int rv = 0;
894
895         if (!msg->user) {
896                 /* Special handling for NULL users. */
897                 if (intf->null_user_handler) {
898                         intf->null_user_handler(intf, msg);
899                 } else {
900                         /* No handler, so give up. */
901                         rv = -EINVAL;
902                 }
903                 ipmi_free_recv_msg(msg);
904         } else if (!oops_in_progress) {
905                 /*
906                  * If we are running in the panic context, calling the
907                  * receive handler doesn't much meaning and has a deadlock
908                  * risk.  At this moment, simply skip it in that case.
909                  */
910                 int index;
911                 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
912
913                 if (user) {
914                         user->handler->ipmi_recv_hndl(msg, user->handler_data);
915                         release_ipmi_user(user, index);
916                 } else {
917                         /* User went away, give up. */
918                         ipmi_free_recv_msg(msg);
919                         rv = -EINVAL;
920                 }
921         }
922
923         return rv;
924 }
925
926 static void deliver_local_response(struct ipmi_smi *intf,
927                                    struct ipmi_recv_msg *msg)
928 {
929         if (deliver_response(intf, msg))
930                 ipmi_inc_stat(intf, unhandled_local_responses);
931         else
932                 ipmi_inc_stat(intf, handled_local_responses);
933 }
934
935 static void deliver_err_response(struct ipmi_smi *intf,
936                                  struct ipmi_recv_msg *msg, int err)
937 {
938         msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
939         msg->msg_data[0] = err;
940         msg->msg.netfn |= 1; /* Convert to a response. */
941         msg->msg.data_len = 1;
942         msg->msg.data = msg->msg_data;
943         deliver_local_response(intf, msg);
944 }
945
946 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
947 {
948         unsigned long iflags;
949
950         if (!intf->handlers->set_need_watch)
951                 return;
952
953         spin_lock_irqsave(&intf->watch_lock, iflags);
954         if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
955                 intf->response_waiters++;
956
957         if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
958                 intf->watchdog_waiters++;
959
960         if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
961                 intf->command_waiters++;
962
963         if ((intf->last_watch_mask & flags) != flags) {
964                 intf->last_watch_mask |= flags;
965                 intf->handlers->set_need_watch(intf->send_info,
966                                                intf->last_watch_mask);
967         }
968         spin_unlock_irqrestore(&intf->watch_lock, iflags);
969 }
970
971 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
972 {
973         unsigned long iflags;
974
975         if (!intf->handlers->set_need_watch)
976                 return;
977
978         spin_lock_irqsave(&intf->watch_lock, iflags);
979         if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
980                 intf->response_waiters--;
981
982         if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
983                 intf->watchdog_waiters--;
984
985         if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
986                 intf->command_waiters--;
987
988         flags = 0;
989         if (intf->response_waiters)
990                 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
991         if (intf->watchdog_waiters)
992                 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
993         if (intf->command_waiters)
994                 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
995
996         if (intf->last_watch_mask != flags) {
997                 intf->last_watch_mask = flags;
998                 intf->handlers->set_need_watch(intf->send_info,
999                                                intf->last_watch_mask);
1000         }
1001         spin_unlock_irqrestore(&intf->watch_lock, iflags);
1002 }
1003
1004 /*
1005  * Find the next sequence number not being used and add the given
1006  * message with the given timeout to the sequence table.  This must be
1007  * called with the interface's seq_lock held.
1008  */
1009 static int intf_next_seq(struct ipmi_smi      *intf,
1010                          struct ipmi_recv_msg *recv_msg,
1011                          unsigned long        timeout,
1012                          int                  retries,
1013                          int                  broadcast,
1014                          unsigned char        *seq,
1015                          long                 *seqid)
1016 {
1017         int          rv = 0;
1018         unsigned int i;
1019
1020         if (timeout == 0)
1021                 timeout = default_retry_ms;
1022         if (retries < 0)
1023                 retries = default_max_retries;
1024
1025         for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1026                                         i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1027                 if (!intf->seq_table[i].inuse)
1028                         break;
1029         }
1030
1031         if (!intf->seq_table[i].inuse) {
1032                 intf->seq_table[i].recv_msg = recv_msg;
1033
1034                 /*
1035                  * Start with the maximum timeout, when the send response
1036                  * comes in we will start the real timer.
1037                  */
1038                 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1039                 intf->seq_table[i].orig_timeout = timeout;
1040                 intf->seq_table[i].retries_left = retries;
1041                 intf->seq_table[i].broadcast = broadcast;
1042                 intf->seq_table[i].inuse = 1;
1043                 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1044                 *seq = i;
1045                 *seqid = intf->seq_table[i].seqid;
1046                 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1047                 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1048                 need_waiter(intf);
1049         } else {
1050                 rv = -EAGAIN;
1051         }
1052
1053         return rv;
1054 }
1055
1056 /*
1057  * Return the receive message for the given sequence number and
1058  * release the sequence number so it can be reused.  Some other data
1059  * is passed in to be sure the message matches up correctly (to help
1060  * guard against message coming in after their timeout and the
1061  * sequence number being reused).
1062  */
1063 static int intf_find_seq(struct ipmi_smi      *intf,
1064                          unsigned char        seq,
1065                          short                channel,
1066                          unsigned char        cmd,
1067                          unsigned char        netfn,
1068                          struct ipmi_addr     *addr,
1069                          struct ipmi_recv_msg **recv_msg)
1070 {
1071         int           rv = -ENODEV;
1072         unsigned long flags;
1073
1074         if (seq >= IPMI_IPMB_NUM_SEQ)
1075                 return -EINVAL;
1076
1077         spin_lock_irqsave(&intf->seq_lock, flags);
1078         if (intf->seq_table[seq].inuse) {
1079                 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1080
1081                 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1082                                 && (msg->msg.netfn == netfn)
1083                                 && (ipmi_addr_equal(addr, &msg->addr))) {
1084                         *recv_msg = msg;
1085                         intf->seq_table[seq].inuse = 0;
1086                         smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1087                         rv = 0;
1088                 }
1089         }
1090         spin_unlock_irqrestore(&intf->seq_lock, flags);
1091
1092         return rv;
1093 }
1094
1095
1096 /* Start the timer for a specific sequence table entry. */
1097 static int intf_start_seq_timer(struct ipmi_smi *intf,
1098                                 long       msgid)
1099 {
1100         int           rv = -ENODEV;
1101         unsigned long flags;
1102         unsigned char seq;
1103         unsigned long seqid;
1104
1105
1106         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1107
1108         spin_lock_irqsave(&intf->seq_lock, flags);
1109         /*
1110          * We do this verification because the user can be deleted
1111          * while a message is outstanding.
1112          */
1113         if ((intf->seq_table[seq].inuse)
1114                                 && (intf->seq_table[seq].seqid == seqid)) {
1115                 struct seq_table *ent = &intf->seq_table[seq];
1116                 ent->timeout = ent->orig_timeout;
1117                 rv = 0;
1118         }
1119         spin_unlock_irqrestore(&intf->seq_lock, flags);
1120
1121         return rv;
1122 }
1123
1124 /* Got an error for the send message for a specific sequence number. */
1125 static int intf_err_seq(struct ipmi_smi *intf,
1126                         long         msgid,
1127                         unsigned int err)
1128 {
1129         int                  rv = -ENODEV;
1130         unsigned long        flags;
1131         unsigned char        seq;
1132         unsigned long        seqid;
1133         struct ipmi_recv_msg *msg = NULL;
1134
1135
1136         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1137
1138         spin_lock_irqsave(&intf->seq_lock, flags);
1139         /*
1140          * We do this verification because the user can be deleted
1141          * while a message is outstanding.
1142          */
1143         if ((intf->seq_table[seq].inuse)
1144                                 && (intf->seq_table[seq].seqid == seqid)) {
1145                 struct seq_table *ent = &intf->seq_table[seq];
1146
1147                 ent->inuse = 0;
1148                 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1149                 msg = ent->recv_msg;
1150                 rv = 0;
1151         }
1152         spin_unlock_irqrestore(&intf->seq_lock, flags);
1153
1154         if (msg)
1155                 deliver_err_response(intf, msg, err);
1156
1157         return rv;
1158 }
1159
1160 int ipmi_create_user(unsigned int          if_num,
1161                      const struct ipmi_user_hndl *handler,
1162                      void                  *handler_data,
1163                      struct ipmi_user      **user)
1164 {
1165         unsigned long flags;
1166         struct ipmi_user *new_user;
1167         int           rv, index;
1168         struct ipmi_smi *intf;
1169
1170         /*
1171          * There is no module usecount here, because it's not
1172          * required.  Since this can only be used by and called from
1173          * other modules, they will implicitly use this module, and
1174          * thus this can't be removed unless the other modules are
1175          * removed.
1176          */
1177
1178         if (handler == NULL)
1179                 return -EINVAL;
1180
1181         /*
1182          * Make sure the driver is actually initialized, this handles
1183          * problems with initialization order.
1184          */
1185         rv = ipmi_init_msghandler();
1186         if (rv)
1187                 return rv;
1188
1189         new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1190         if (!new_user)
1191                 return -ENOMEM;
1192
1193         index = srcu_read_lock(&ipmi_interfaces_srcu);
1194         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1195                 if (intf->intf_num == if_num)
1196                         goto found;
1197         }
1198         /* Not found, return an error */
1199         rv = -EINVAL;
1200         goto out_kfree;
1201
1202  found:
1203         rv = init_srcu_struct(&new_user->release_barrier);
1204         if (rv)
1205                 goto out_kfree;
1206
1207         /* Note that each existing user holds a refcount to the interface. */
1208         kref_get(&intf->refcount);
1209
1210         kref_init(&new_user->refcount);
1211         new_user->handler = handler;
1212         new_user->handler_data = handler_data;
1213         new_user->intf = intf;
1214         new_user->gets_events = false;
1215
1216         rcu_assign_pointer(new_user->self, new_user);
1217         spin_lock_irqsave(&intf->seq_lock, flags);
1218         list_add_rcu(&new_user->link, &intf->users);
1219         spin_unlock_irqrestore(&intf->seq_lock, flags);
1220         if (handler->ipmi_watchdog_pretimeout)
1221                 /* User wants pretimeouts, so make sure to watch for them. */
1222                 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1223         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1224         *user = new_user;
1225         return 0;
1226
1227 out_kfree:
1228         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1229         kfree(new_user);
1230         return rv;
1231 }
1232 EXPORT_SYMBOL(ipmi_create_user);
1233
1234 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1235 {
1236         int rv, index;
1237         struct ipmi_smi *intf;
1238
1239         index = srcu_read_lock(&ipmi_interfaces_srcu);
1240         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1241                 if (intf->intf_num == if_num)
1242                         goto found;
1243         }
1244         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1245
1246         /* Not found, return an error */
1247         return -EINVAL;
1248
1249 found:
1250         if (!intf->handlers->get_smi_info)
1251                 rv = -ENOTTY;
1252         else
1253                 rv = intf->handlers->get_smi_info(intf->send_info, data);
1254         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1255
1256         return rv;
1257 }
1258 EXPORT_SYMBOL(ipmi_get_smi_info);
1259
1260 static void free_user(struct kref *ref)
1261 {
1262         struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1263         cleanup_srcu_struct(&user->release_barrier);
1264         kfree(user);
1265 }
1266
1267 static void _ipmi_destroy_user(struct ipmi_user *user)
1268 {
1269         struct ipmi_smi  *intf = user->intf;
1270         int              i;
1271         unsigned long    flags;
1272         struct cmd_rcvr  *rcvr;
1273         struct cmd_rcvr  *rcvrs = NULL;
1274
1275         if (!acquire_ipmi_user(user, &i)) {
1276                 /*
1277                  * The user has already been cleaned up, just make sure
1278                  * nothing is using it and return.
1279                  */
1280                 synchronize_srcu(&user->release_barrier);
1281                 return;
1282         }
1283
1284         rcu_assign_pointer(user->self, NULL);
1285         release_ipmi_user(user, i);
1286
1287         synchronize_srcu(&user->release_barrier);
1288
1289         if (user->handler->shutdown)
1290                 user->handler->shutdown(user->handler_data);
1291
1292         if (user->handler->ipmi_watchdog_pretimeout)
1293                 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1294
1295         if (user->gets_events)
1296                 atomic_dec(&intf->event_waiters);
1297
1298         /* Remove the user from the interface's sequence table. */
1299         spin_lock_irqsave(&intf->seq_lock, flags);
1300         list_del_rcu(&user->link);
1301
1302         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1303                 if (intf->seq_table[i].inuse
1304                     && (intf->seq_table[i].recv_msg->user == user)) {
1305                         intf->seq_table[i].inuse = 0;
1306                         smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1307                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1308                 }
1309         }
1310         spin_unlock_irqrestore(&intf->seq_lock, flags);
1311
1312         /*
1313          * Remove the user from the command receiver's table.  First
1314          * we build a list of everything (not using the standard link,
1315          * since other things may be using it till we do
1316          * synchronize_srcu()) then free everything in that list.
1317          */
1318         mutex_lock(&intf->cmd_rcvrs_mutex);
1319         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1320                 if (rcvr->user == user) {
1321                         list_del_rcu(&rcvr->link);
1322                         rcvr->next = rcvrs;
1323                         rcvrs = rcvr;
1324                 }
1325         }
1326         mutex_unlock(&intf->cmd_rcvrs_mutex);
1327         synchronize_rcu();
1328         while (rcvrs) {
1329                 rcvr = rcvrs;
1330                 rcvrs = rcvr->next;
1331                 kfree(rcvr);
1332         }
1333
1334         kref_put(&intf->refcount, intf_free);
1335 }
1336
1337 int ipmi_destroy_user(struct ipmi_user *user)
1338 {
1339         _ipmi_destroy_user(user);
1340
1341         kref_put(&user->refcount, free_user);
1342
1343         return 0;
1344 }
1345 EXPORT_SYMBOL(ipmi_destroy_user);
1346
1347 int ipmi_get_version(struct ipmi_user *user,
1348                      unsigned char *major,
1349                      unsigned char *minor)
1350 {
1351         struct ipmi_device_id id;
1352         int rv, index;
1353
1354         user = acquire_ipmi_user(user, &index);
1355         if (!user)
1356                 return -ENODEV;
1357
1358         rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1359         if (!rv) {
1360                 *major = ipmi_version_major(&id);
1361                 *minor = ipmi_version_minor(&id);
1362         }
1363         release_ipmi_user(user, index);
1364
1365         return rv;
1366 }
1367 EXPORT_SYMBOL(ipmi_get_version);
1368
1369 int ipmi_set_my_address(struct ipmi_user *user,
1370                         unsigned int  channel,
1371                         unsigned char address)
1372 {
1373         int index, rv = 0;
1374
1375         user = acquire_ipmi_user(user, &index);
1376         if (!user)
1377                 return -ENODEV;
1378
1379         if (channel >= IPMI_MAX_CHANNELS) {
1380                 rv = -EINVAL;
1381         } else {
1382                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1383                 user->intf->addrinfo[channel].address = address;
1384         }
1385         release_ipmi_user(user, index);
1386
1387         return rv;
1388 }
1389 EXPORT_SYMBOL(ipmi_set_my_address);
1390
1391 int ipmi_get_my_address(struct ipmi_user *user,
1392                         unsigned int  channel,
1393                         unsigned char *address)
1394 {
1395         int index, rv = 0;
1396
1397         user = acquire_ipmi_user(user, &index);
1398         if (!user)
1399                 return -ENODEV;
1400
1401         if (channel >= IPMI_MAX_CHANNELS) {
1402                 rv = -EINVAL;
1403         } else {
1404                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1405                 *address = user->intf->addrinfo[channel].address;
1406         }
1407         release_ipmi_user(user, index);
1408
1409         return rv;
1410 }
1411 EXPORT_SYMBOL(ipmi_get_my_address);
1412
1413 int ipmi_set_my_LUN(struct ipmi_user *user,
1414                     unsigned int  channel,
1415                     unsigned char LUN)
1416 {
1417         int index, rv = 0;
1418
1419         user = acquire_ipmi_user(user, &index);
1420         if (!user)
1421                 return -ENODEV;
1422
1423         if (channel >= IPMI_MAX_CHANNELS) {
1424                 rv = -EINVAL;
1425         } else {
1426                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1427                 user->intf->addrinfo[channel].lun = LUN & 0x3;
1428         }
1429         release_ipmi_user(user, index);
1430
1431         return rv;
1432 }
1433 EXPORT_SYMBOL(ipmi_set_my_LUN);
1434
1435 int ipmi_get_my_LUN(struct ipmi_user *user,
1436                     unsigned int  channel,
1437                     unsigned char *address)
1438 {
1439         int index, rv = 0;
1440
1441         user = acquire_ipmi_user(user, &index);
1442         if (!user)
1443                 return -ENODEV;
1444
1445         if (channel >= IPMI_MAX_CHANNELS) {
1446                 rv = -EINVAL;
1447         } else {
1448                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1449                 *address = user->intf->addrinfo[channel].lun;
1450         }
1451         release_ipmi_user(user, index);
1452
1453         return rv;
1454 }
1455 EXPORT_SYMBOL(ipmi_get_my_LUN);
1456
1457 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1458 {
1459         int mode, index;
1460         unsigned long flags;
1461
1462         user = acquire_ipmi_user(user, &index);
1463         if (!user)
1464                 return -ENODEV;
1465
1466         spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1467         mode = user->intf->maintenance_mode;
1468         spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1469         release_ipmi_user(user, index);
1470
1471         return mode;
1472 }
1473 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1474
1475 static void maintenance_mode_update(struct ipmi_smi *intf)
1476 {
1477         if (intf->handlers->set_maintenance_mode)
1478                 intf->handlers->set_maintenance_mode(
1479                         intf->send_info, intf->maintenance_mode_enable);
1480 }
1481
1482 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1483 {
1484         int rv = 0, index;
1485         unsigned long flags;
1486         struct ipmi_smi *intf = user->intf;
1487
1488         user = acquire_ipmi_user(user, &index);
1489         if (!user)
1490                 return -ENODEV;
1491
1492         spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1493         if (intf->maintenance_mode != mode) {
1494                 switch (mode) {
1495                 case IPMI_MAINTENANCE_MODE_AUTO:
1496                         intf->maintenance_mode_enable
1497                                 = (intf->auto_maintenance_timeout > 0);
1498                         break;
1499
1500                 case IPMI_MAINTENANCE_MODE_OFF:
1501                         intf->maintenance_mode_enable = false;
1502                         break;
1503
1504                 case IPMI_MAINTENANCE_MODE_ON:
1505                         intf->maintenance_mode_enable = true;
1506                         break;
1507
1508                 default:
1509                         rv = -EINVAL;
1510                         goto out_unlock;
1511                 }
1512                 intf->maintenance_mode = mode;
1513
1514                 maintenance_mode_update(intf);
1515         }
1516  out_unlock:
1517         spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1518         release_ipmi_user(user, index);
1519
1520         return rv;
1521 }
1522 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1523
1524 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1525 {
1526         unsigned long        flags;
1527         struct ipmi_smi      *intf = user->intf;
1528         struct ipmi_recv_msg *msg, *msg2;
1529         struct list_head     msgs;
1530         int index;
1531
1532         user = acquire_ipmi_user(user, &index);
1533         if (!user)
1534                 return -ENODEV;
1535
1536         INIT_LIST_HEAD(&msgs);
1537
1538         spin_lock_irqsave(&intf->events_lock, flags);
1539         if (user->gets_events == val)
1540                 goto out;
1541
1542         user->gets_events = val;
1543
1544         if (val) {
1545                 if (atomic_inc_return(&intf->event_waiters) == 1)
1546                         need_waiter(intf);
1547         } else {
1548                 atomic_dec(&intf->event_waiters);
1549         }
1550
1551         if (intf->delivering_events)
1552                 /*
1553                  * Another thread is delivering events for this, so
1554                  * let it handle any new events.
1555                  */
1556                 goto out;
1557
1558         /* Deliver any queued events. */
1559         while (user->gets_events && !list_empty(&intf->waiting_events)) {
1560                 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1561                         list_move_tail(&msg->link, &msgs);
1562                 intf->waiting_events_count = 0;
1563                 if (intf->event_msg_printed) {
1564                         dev_warn(intf->si_dev, "Event queue no longer full\n");
1565                         intf->event_msg_printed = 0;
1566                 }
1567
1568                 intf->delivering_events = 1;
1569                 spin_unlock_irqrestore(&intf->events_lock, flags);
1570
1571                 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1572                         msg->user = user;
1573                         kref_get(&user->refcount);
1574                         deliver_local_response(intf, msg);
1575                 }
1576
1577                 spin_lock_irqsave(&intf->events_lock, flags);
1578                 intf->delivering_events = 0;
1579         }
1580
1581  out:
1582         spin_unlock_irqrestore(&intf->events_lock, flags);
1583         release_ipmi_user(user, index);
1584
1585         return 0;
1586 }
1587 EXPORT_SYMBOL(ipmi_set_gets_events);
1588
1589 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1590                                       unsigned char netfn,
1591                                       unsigned char cmd,
1592                                       unsigned char chan)
1593 {
1594         struct cmd_rcvr *rcvr;
1595
1596         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1597                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1598                                         && (rcvr->chans & (1 << chan)))
1599                         return rcvr;
1600         }
1601         return NULL;
1602 }
1603
1604 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1605                                  unsigned char netfn,
1606                                  unsigned char cmd,
1607                                  unsigned int  chans)
1608 {
1609         struct cmd_rcvr *rcvr;
1610
1611         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1612                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1613                                         && (rcvr->chans & chans))
1614                         return 0;
1615         }
1616         return 1;
1617 }
1618
1619 int ipmi_register_for_cmd(struct ipmi_user *user,
1620                           unsigned char netfn,
1621                           unsigned char cmd,
1622                           unsigned int  chans)
1623 {
1624         struct ipmi_smi *intf = user->intf;
1625         struct cmd_rcvr *rcvr;
1626         int rv = 0, index;
1627
1628         user = acquire_ipmi_user(user, &index);
1629         if (!user)
1630                 return -ENODEV;
1631
1632         rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1633         if (!rcvr) {
1634                 rv = -ENOMEM;
1635                 goto out_release;
1636         }
1637         rcvr->cmd = cmd;
1638         rcvr->netfn = netfn;
1639         rcvr->chans = chans;
1640         rcvr->user = user;
1641
1642         mutex_lock(&intf->cmd_rcvrs_mutex);
1643         /* Make sure the command/netfn is not already registered. */
1644         if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1645                 rv = -EBUSY;
1646                 goto out_unlock;
1647         }
1648
1649         smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1650
1651         list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1652
1653 out_unlock:
1654         mutex_unlock(&intf->cmd_rcvrs_mutex);
1655         if (rv)
1656                 kfree(rcvr);
1657 out_release:
1658         release_ipmi_user(user, index);
1659
1660         return rv;
1661 }
1662 EXPORT_SYMBOL(ipmi_register_for_cmd);
1663
1664 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1665                             unsigned char netfn,
1666                             unsigned char cmd,
1667                             unsigned int  chans)
1668 {
1669         struct ipmi_smi *intf = user->intf;
1670         struct cmd_rcvr *rcvr;
1671         struct cmd_rcvr *rcvrs = NULL;
1672         int i, rv = -ENOENT, index;
1673
1674         user = acquire_ipmi_user(user, &index);
1675         if (!user)
1676                 return -ENODEV;
1677
1678         mutex_lock(&intf->cmd_rcvrs_mutex);
1679         for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1680                 if (((1 << i) & chans) == 0)
1681                         continue;
1682                 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1683                 if (rcvr == NULL)
1684                         continue;
1685                 if (rcvr->user == user) {
1686                         rv = 0;
1687                         rcvr->chans &= ~chans;
1688                         if (rcvr->chans == 0) {
1689                                 list_del_rcu(&rcvr->link);
1690                                 rcvr->next = rcvrs;
1691                                 rcvrs = rcvr;
1692                         }
1693                 }
1694         }
1695         mutex_unlock(&intf->cmd_rcvrs_mutex);
1696         synchronize_rcu();
1697         release_ipmi_user(user, index);
1698         while (rcvrs) {
1699                 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1700                 rcvr = rcvrs;
1701                 rcvrs = rcvr->next;
1702                 kfree(rcvr);
1703         }
1704
1705         return rv;
1706 }
1707 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1708
1709 static unsigned char
1710 ipmb_checksum(unsigned char *data, int size)
1711 {
1712         unsigned char csum = 0;
1713
1714         for (; size > 0; size--, data++)
1715                 csum += *data;
1716
1717         return -csum;
1718 }
1719
1720 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1721                                    struct kernel_ipmi_msg *msg,
1722                                    struct ipmi_ipmb_addr *ipmb_addr,
1723                                    long                  msgid,
1724                                    unsigned char         ipmb_seq,
1725                                    int                   broadcast,
1726                                    unsigned char         source_address,
1727                                    unsigned char         source_lun)
1728 {
1729         int i = broadcast;
1730
1731         /* Format the IPMB header data. */
1732         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1733         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1734         smi_msg->data[2] = ipmb_addr->channel;
1735         if (broadcast)
1736                 smi_msg->data[3] = 0;
1737         smi_msg->data[i+3] = ipmb_addr->slave_addr;
1738         smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1739         smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1740         smi_msg->data[i+6] = source_address;
1741         smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1742         smi_msg->data[i+8] = msg->cmd;
1743
1744         /* Now tack on the data to the message. */
1745         if (msg->data_len > 0)
1746                 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1747         smi_msg->data_size = msg->data_len + 9;
1748
1749         /* Now calculate the checksum and tack it on. */
1750         smi_msg->data[i+smi_msg->data_size]
1751                 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1752
1753         /*
1754          * Add on the checksum size and the offset from the
1755          * broadcast.
1756          */
1757         smi_msg->data_size += 1 + i;
1758
1759         smi_msg->msgid = msgid;
1760 }
1761
1762 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1763                                   struct kernel_ipmi_msg *msg,
1764                                   struct ipmi_lan_addr  *lan_addr,
1765                                   long                  msgid,
1766                                   unsigned char         ipmb_seq,
1767                                   unsigned char         source_lun)
1768 {
1769         /* Format the IPMB header data. */
1770         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1771         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1772         smi_msg->data[2] = lan_addr->channel;
1773         smi_msg->data[3] = lan_addr->session_handle;
1774         smi_msg->data[4] = lan_addr->remote_SWID;
1775         smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1776         smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1777         smi_msg->data[7] = lan_addr->local_SWID;
1778         smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1779         smi_msg->data[9] = msg->cmd;
1780
1781         /* Now tack on the data to the message. */
1782         if (msg->data_len > 0)
1783                 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1784         smi_msg->data_size = msg->data_len + 10;
1785
1786         /* Now calculate the checksum and tack it on. */
1787         smi_msg->data[smi_msg->data_size]
1788                 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1789
1790         /*
1791          * Add on the checksum size and the offset from the
1792          * broadcast.
1793          */
1794         smi_msg->data_size += 1;
1795
1796         smi_msg->msgid = msgid;
1797 }
1798
1799 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1800                                              struct ipmi_smi_msg *smi_msg,
1801                                              int priority)
1802 {
1803         if (intf->curr_msg) {
1804                 if (priority > 0)
1805                         list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1806                 else
1807                         list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1808                 smi_msg = NULL;
1809         } else {
1810                 intf->curr_msg = smi_msg;
1811         }
1812
1813         return smi_msg;
1814 }
1815
1816 static void smi_send(struct ipmi_smi *intf,
1817                      const struct ipmi_smi_handlers *handlers,
1818                      struct ipmi_smi_msg *smi_msg, int priority)
1819 {
1820         int run_to_completion = intf->run_to_completion;
1821         unsigned long flags = 0;
1822
1823         if (!run_to_completion)
1824                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1825         smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1826
1827         if (!run_to_completion)
1828                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1829
1830         if (smi_msg)
1831                 handlers->sender(intf->send_info, smi_msg);
1832 }
1833
1834 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1835 {
1836         return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1837                  && ((msg->cmd == IPMI_COLD_RESET_CMD)
1838                      || (msg->cmd == IPMI_WARM_RESET_CMD)))
1839                 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1840 }
1841
1842 static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1843                               struct ipmi_addr       *addr,
1844                               long                   msgid,
1845                               struct kernel_ipmi_msg *msg,
1846                               struct ipmi_smi_msg    *smi_msg,
1847                               struct ipmi_recv_msg   *recv_msg,
1848                               int                    retries,
1849                               unsigned int           retry_time_ms)
1850 {
1851         struct ipmi_system_interface_addr *smi_addr;
1852
1853         if (msg->netfn & 1)
1854                 /* Responses are not allowed to the SMI. */
1855                 return -EINVAL;
1856
1857         smi_addr = (struct ipmi_system_interface_addr *) addr;
1858         if (smi_addr->lun > 3) {
1859                 ipmi_inc_stat(intf, sent_invalid_commands);
1860                 return -EINVAL;
1861         }
1862
1863         memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1864
1865         if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1866             && ((msg->cmd == IPMI_SEND_MSG_CMD)
1867                 || (msg->cmd == IPMI_GET_MSG_CMD)
1868                 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1869                 /*
1870                  * We don't let the user do these, since we manage
1871                  * the sequence numbers.
1872                  */
1873                 ipmi_inc_stat(intf, sent_invalid_commands);
1874                 return -EINVAL;
1875         }
1876
1877         if (is_maintenance_mode_cmd(msg)) {
1878                 unsigned long flags;
1879
1880                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1881                 intf->auto_maintenance_timeout
1882                         = maintenance_mode_timeout_ms;
1883                 if (!intf->maintenance_mode
1884                     && !intf->maintenance_mode_enable) {
1885                         intf->maintenance_mode_enable = true;
1886                         maintenance_mode_update(intf);
1887                 }
1888                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1889                                        flags);
1890         }
1891
1892         if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1893                 ipmi_inc_stat(intf, sent_invalid_commands);
1894                 return -EMSGSIZE;
1895         }
1896
1897         smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1898         smi_msg->data[1] = msg->cmd;
1899         smi_msg->msgid = msgid;
1900         smi_msg->user_data = recv_msg;
1901         if (msg->data_len > 0)
1902                 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1903         smi_msg->data_size = msg->data_len + 2;
1904         ipmi_inc_stat(intf, sent_local_commands);
1905
1906         return 0;
1907 }
1908
1909 static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1910                            struct ipmi_addr       *addr,
1911                            long                   msgid,
1912                            struct kernel_ipmi_msg *msg,
1913                            struct ipmi_smi_msg    *smi_msg,
1914                            struct ipmi_recv_msg   *recv_msg,
1915                            unsigned char          source_address,
1916                            unsigned char          source_lun,
1917                            int                    retries,
1918                            unsigned int           retry_time_ms)
1919 {
1920         struct ipmi_ipmb_addr *ipmb_addr;
1921         unsigned char ipmb_seq;
1922         long seqid;
1923         int broadcast = 0;
1924         struct ipmi_channel *chans;
1925         int rv = 0;
1926
1927         if (addr->channel >= IPMI_MAX_CHANNELS) {
1928                 ipmi_inc_stat(intf, sent_invalid_commands);
1929                 return -EINVAL;
1930         }
1931
1932         chans = READ_ONCE(intf->channel_list)->c;
1933
1934         if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1935                 ipmi_inc_stat(intf, sent_invalid_commands);
1936                 return -EINVAL;
1937         }
1938
1939         if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1940                 /*
1941                  * Broadcasts add a zero at the beginning of the
1942                  * message, but otherwise is the same as an IPMB
1943                  * address.
1944                  */
1945                 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1946                 broadcast = 1;
1947                 retries = 0; /* Don't retry broadcasts. */
1948         }
1949
1950         /*
1951          * 9 for the header and 1 for the checksum, plus
1952          * possibly one for the broadcast.
1953          */
1954         if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1955                 ipmi_inc_stat(intf, sent_invalid_commands);
1956                 return -EMSGSIZE;
1957         }
1958
1959         ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1960         if (ipmb_addr->lun > 3) {
1961                 ipmi_inc_stat(intf, sent_invalid_commands);
1962                 return -EINVAL;
1963         }
1964
1965         memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1966
1967         if (recv_msg->msg.netfn & 0x1) {
1968                 /*
1969                  * It's a response, so use the user's sequence
1970                  * from msgid.
1971                  */
1972                 ipmi_inc_stat(intf, sent_ipmb_responses);
1973                 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1974                                 msgid, broadcast,
1975                                 source_address, source_lun);
1976
1977                 /*
1978                  * Save the receive message so we can use it
1979                  * to deliver the response.
1980                  */
1981                 smi_msg->user_data = recv_msg;
1982         } else {
1983                 /* It's a command, so get a sequence for it. */
1984                 unsigned long flags;
1985
1986                 spin_lock_irqsave(&intf->seq_lock, flags);
1987
1988                 if (is_maintenance_mode_cmd(msg))
1989                         intf->ipmb_maintenance_mode_timeout =
1990                                 maintenance_mode_timeout_ms;
1991
1992                 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1993                         /* Different default in maintenance mode */
1994                         retry_time_ms = default_maintenance_retry_ms;
1995
1996                 /*
1997                  * Create a sequence number with a 1 second
1998                  * timeout and 4 retries.
1999                  */
2000                 rv = intf_next_seq(intf,
2001                                    recv_msg,
2002                                    retry_time_ms,
2003                                    retries,
2004                                    broadcast,
2005                                    &ipmb_seq,
2006                                    &seqid);
2007                 if (rv)
2008                         /*
2009                          * We have used up all the sequence numbers,
2010                          * probably, so abort.
2011                          */
2012                         goto out_err;
2013
2014                 ipmi_inc_stat(intf, sent_ipmb_commands);
2015
2016                 /*
2017                  * Store the sequence number in the message,
2018                  * so that when the send message response
2019                  * comes back we can start the timer.
2020                  */
2021                 format_ipmb_msg(smi_msg, msg, ipmb_addr,
2022                                 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2023                                 ipmb_seq, broadcast,
2024                                 source_address, source_lun);
2025
2026                 /*
2027                  * Copy the message into the recv message data, so we
2028                  * can retransmit it later if necessary.
2029                  */
2030                 memcpy(recv_msg->msg_data, smi_msg->data,
2031                        smi_msg->data_size);
2032                 recv_msg->msg.data = recv_msg->msg_data;
2033                 recv_msg->msg.data_len = smi_msg->data_size;
2034
2035                 /*
2036                  * We don't unlock until here, because we need
2037                  * to copy the completed message into the
2038                  * recv_msg before we release the lock.
2039                  * Otherwise, race conditions may bite us.  I
2040                  * know that's pretty paranoid, but I prefer
2041                  * to be correct.
2042                  */
2043 out_err:
2044                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2045         }
2046
2047         return rv;
2048 }
2049
2050 static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2051                           struct ipmi_addr       *addr,
2052                           long                   msgid,
2053                           struct kernel_ipmi_msg *msg,
2054                           struct ipmi_smi_msg    *smi_msg,
2055                           struct ipmi_recv_msg   *recv_msg,
2056                           unsigned char          source_lun,
2057                           int                    retries,
2058                           unsigned int           retry_time_ms)
2059 {
2060         struct ipmi_lan_addr  *lan_addr;
2061         unsigned char ipmb_seq;
2062         long seqid;
2063         struct ipmi_channel *chans;
2064         int rv = 0;
2065
2066         if (addr->channel >= IPMI_MAX_CHANNELS) {
2067                 ipmi_inc_stat(intf, sent_invalid_commands);
2068                 return -EINVAL;
2069         }
2070
2071         chans = READ_ONCE(intf->channel_list)->c;
2072
2073         if ((chans[addr->channel].medium
2074                                 != IPMI_CHANNEL_MEDIUM_8023LAN)
2075                         && (chans[addr->channel].medium
2076                             != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2077                 ipmi_inc_stat(intf, sent_invalid_commands);
2078                 return -EINVAL;
2079         }
2080
2081         /* 11 for the header and 1 for the checksum. */
2082         if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2083                 ipmi_inc_stat(intf, sent_invalid_commands);
2084                 return -EMSGSIZE;
2085         }
2086
2087         lan_addr = (struct ipmi_lan_addr *) addr;
2088         if (lan_addr->lun > 3) {
2089                 ipmi_inc_stat(intf, sent_invalid_commands);
2090                 return -EINVAL;
2091         }
2092
2093         memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2094
2095         if (recv_msg->msg.netfn & 0x1) {
2096                 /*
2097                  * It's a response, so use the user's sequence
2098                  * from msgid.
2099                  */
2100                 ipmi_inc_stat(intf, sent_lan_responses);
2101                 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2102                                msgid, source_lun);
2103
2104                 /*
2105                  * Save the receive message so we can use it
2106                  * to deliver the response.
2107                  */
2108                 smi_msg->user_data = recv_msg;
2109         } else {
2110                 /* It's a command, so get a sequence for it. */
2111                 unsigned long flags;
2112
2113                 spin_lock_irqsave(&intf->seq_lock, flags);
2114
2115                 /*
2116                  * Create a sequence number with a 1 second
2117                  * timeout and 4 retries.
2118                  */
2119                 rv = intf_next_seq(intf,
2120                                    recv_msg,
2121                                    retry_time_ms,
2122                                    retries,
2123                                    0,
2124                                    &ipmb_seq,
2125                                    &seqid);
2126                 if (rv)
2127                         /*
2128                          * We have used up all the sequence numbers,
2129                          * probably, so abort.
2130                          */
2131                         goto out_err;
2132
2133                 ipmi_inc_stat(intf, sent_lan_commands);
2134
2135                 /*
2136                  * Store the sequence number in the message,
2137                  * so that when the send message response
2138                  * comes back we can start the timer.
2139                  */
2140                 format_lan_msg(smi_msg, msg, lan_addr,
2141                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2142                                ipmb_seq, source_lun);
2143
2144                 /*
2145                  * Copy the message into the recv message data, so we
2146                  * can retransmit it later if necessary.
2147                  */
2148                 memcpy(recv_msg->msg_data, smi_msg->data,
2149                        smi_msg->data_size);
2150                 recv_msg->msg.data = recv_msg->msg_data;
2151                 recv_msg->msg.data_len = smi_msg->data_size;
2152
2153                 /*
2154                  * We don't unlock until here, because we need
2155                  * to copy the completed message into the
2156                  * recv_msg before we release the lock.
2157                  * Otherwise, race conditions may bite us.  I
2158                  * know that's pretty paranoid, but I prefer
2159                  * to be correct.
2160                  */
2161 out_err:
2162                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2163         }
2164
2165         return rv;
2166 }
2167
2168 /*
2169  * Separate from ipmi_request so that the user does not have to be
2170  * supplied in certain circumstances (mainly at panic time).  If
2171  * messages are supplied, they will be freed, even if an error
2172  * occurs.
2173  */
2174 static int i_ipmi_request(struct ipmi_user     *user,
2175                           struct ipmi_smi      *intf,
2176                           struct ipmi_addr     *addr,
2177                           long                 msgid,
2178                           struct kernel_ipmi_msg *msg,
2179                           void                 *user_msg_data,
2180                           void                 *supplied_smi,
2181                           struct ipmi_recv_msg *supplied_recv,
2182                           int                  priority,
2183                           unsigned char        source_address,
2184                           unsigned char        source_lun,
2185                           int                  retries,
2186                           unsigned int         retry_time_ms)
2187 {
2188         struct ipmi_smi_msg *smi_msg;
2189         struct ipmi_recv_msg *recv_msg;
2190         int rv = 0;
2191
2192         if (supplied_recv)
2193                 recv_msg = supplied_recv;
2194         else {
2195                 recv_msg = ipmi_alloc_recv_msg();
2196                 if (recv_msg == NULL) {
2197                         rv = -ENOMEM;
2198                         goto out;
2199                 }
2200         }
2201         recv_msg->user_msg_data = user_msg_data;
2202
2203         if (supplied_smi)
2204                 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2205         else {
2206                 smi_msg = ipmi_alloc_smi_msg();
2207                 if (smi_msg == NULL) {
2208                         ipmi_free_recv_msg(recv_msg);
2209                         rv = -ENOMEM;
2210                         goto out;
2211                 }
2212         }
2213
2214         rcu_read_lock();
2215         if (intf->in_shutdown) {
2216                 rv = -ENODEV;
2217                 goto out_err;
2218         }
2219
2220         recv_msg->user = user;
2221         if (user)
2222                 /* The put happens when the message is freed. */
2223                 kref_get(&user->refcount);
2224         recv_msg->msgid = msgid;
2225         /*
2226          * Store the message to send in the receive message so timeout
2227          * responses can get the proper response data.
2228          */
2229         recv_msg->msg = *msg;
2230
2231         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2232                 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2233                                         recv_msg, retries, retry_time_ms);
2234         } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2235                 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2236                                      source_address, source_lun,
2237                                      retries, retry_time_ms);
2238         } else if (is_lan_addr(addr)) {
2239                 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2240                                     source_lun, retries, retry_time_ms);
2241         } else {
2242             /* Unknown address type. */
2243                 ipmi_inc_stat(intf, sent_invalid_commands);
2244                 rv = -EINVAL;
2245         }
2246
2247         if (rv) {
2248 out_err:
2249                 ipmi_free_smi_msg(smi_msg);
2250                 ipmi_free_recv_msg(recv_msg);
2251         } else {
2252                 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2253
2254                 smi_send(intf, intf->handlers, smi_msg, priority);
2255         }
2256         rcu_read_unlock();
2257
2258 out:
2259         return rv;
2260 }
2261
2262 static int check_addr(struct ipmi_smi  *intf,
2263                       struct ipmi_addr *addr,
2264                       unsigned char    *saddr,
2265                       unsigned char    *lun)
2266 {
2267         if (addr->channel >= IPMI_MAX_CHANNELS)
2268                 return -EINVAL;
2269         addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2270         *lun = intf->addrinfo[addr->channel].lun;
2271         *saddr = intf->addrinfo[addr->channel].address;
2272         return 0;
2273 }
2274
2275 int ipmi_request_settime(struct ipmi_user *user,
2276                          struct ipmi_addr *addr,
2277                          long             msgid,
2278                          struct kernel_ipmi_msg  *msg,
2279                          void             *user_msg_data,
2280                          int              priority,
2281                          int              retries,
2282                          unsigned int     retry_time_ms)
2283 {
2284         unsigned char saddr = 0, lun = 0;
2285         int rv, index;
2286
2287         if (!user)
2288                 return -EINVAL;
2289
2290         user = acquire_ipmi_user(user, &index);
2291         if (!user)
2292                 return -ENODEV;
2293
2294         rv = check_addr(user->intf, addr, &saddr, &lun);
2295         if (!rv)
2296                 rv = i_ipmi_request(user,
2297                                     user->intf,
2298                                     addr,
2299                                     msgid,
2300                                     msg,
2301                                     user_msg_data,
2302                                     NULL, NULL,
2303                                     priority,
2304                                     saddr,
2305                                     lun,
2306                                     retries,
2307                                     retry_time_ms);
2308
2309         release_ipmi_user(user, index);
2310         return rv;
2311 }
2312 EXPORT_SYMBOL(ipmi_request_settime);
2313
2314 int ipmi_request_supply_msgs(struct ipmi_user     *user,
2315                              struct ipmi_addr     *addr,
2316                              long                 msgid,
2317                              struct kernel_ipmi_msg *msg,
2318                              void                 *user_msg_data,
2319                              void                 *supplied_smi,
2320                              struct ipmi_recv_msg *supplied_recv,
2321                              int                  priority)
2322 {
2323         unsigned char saddr = 0, lun = 0;
2324         int rv, index;
2325
2326         if (!user)
2327                 return -EINVAL;
2328
2329         user = acquire_ipmi_user(user, &index);
2330         if (!user)
2331                 return -ENODEV;
2332
2333         rv = check_addr(user->intf, addr, &saddr, &lun);
2334         if (!rv)
2335                 rv = i_ipmi_request(user,
2336                                     user->intf,
2337                                     addr,
2338                                     msgid,
2339                                     msg,
2340                                     user_msg_data,
2341                                     supplied_smi,
2342                                     supplied_recv,
2343                                     priority,
2344                                     saddr,
2345                                     lun,
2346                                     -1, 0);
2347
2348         release_ipmi_user(user, index);
2349         return rv;
2350 }
2351 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2352
2353 static void bmc_device_id_handler(struct ipmi_smi *intf,
2354                                   struct ipmi_recv_msg *msg)
2355 {
2356         int rv;
2357
2358         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2359                         || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2360                         || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2361                 dev_warn(intf->si_dev,
2362                          "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2363                          msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2364                 return;
2365         }
2366
2367         rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2368                         msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2369         if (rv) {
2370                 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2371                 intf->bmc->dyn_id_set = 0;
2372         } else {
2373                 /*
2374                  * Make sure the id data is available before setting
2375                  * dyn_id_set.
2376                  */
2377                 smp_wmb();
2378                 intf->bmc->dyn_id_set = 1;
2379         }
2380
2381         wake_up(&intf->waitq);
2382 }
2383
2384 static int
2385 send_get_device_id_cmd(struct ipmi_smi *intf)
2386 {
2387         struct ipmi_system_interface_addr si;
2388         struct kernel_ipmi_msg msg;
2389
2390         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2391         si.channel = IPMI_BMC_CHANNEL;
2392         si.lun = 0;
2393
2394         msg.netfn = IPMI_NETFN_APP_REQUEST;
2395         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2396         msg.data = NULL;
2397         msg.data_len = 0;
2398
2399         return i_ipmi_request(NULL,
2400                               intf,
2401                               (struct ipmi_addr *) &si,
2402                               0,
2403                               &msg,
2404                               intf,
2405                               NULL,
2406                               NULL,
2407                               0,
2408                               intf->addrinfo[0].address,
2409                               intf->addrinfo[0].lun,
2410                               -1, 0);
2411 }
2412
2413 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2414 {
2415         int rv;
2416
2417         bmc->dyn_id_set = 2;
2418
2419         intf->null_user_handler = bmc_device_id_handler;
2420
2421         rv = send_get_device_id_cmd(intf);
2422         if (rv)
2423                 return rv;
2424
2425         wait_event(intf->waitq, bmc->dyn_id_set != 2);
2426
2427         if (!bmc->dyn_id_set)
2428                 rv = -EIO; /* Something went wrong in the fetch. */
2429
2430         /* dyn_id_set makes the id data available. */
2431         smp_rmb();
2432
2433         intf->null_user_handler = NULL;
2434
2435         return rv;
2436 }
2437
2438 /*
2439  * Fetch the device id for the bmc/interface.  You must pass in either
2440  * bmc or intf, this code will get the other one.  If the data has
2441  * been recently fetched, this will just use the cached data.  Otherwise
2442  * it will run a new fetch.
2443  *
2444  * Except for the first time this is called (in ipmi_register_smi()),
2445  * this will always return good data;
2446  */
2447 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2448                                struct ipmi_device_id *id,
2449                                bool *guid_set, guid_t *guid, int intf_num)
2450 {
2451         int rv = 0;
2452         int prev_dyn_id_set, prev_guid_set;
2453         bool intf_set = intf != NULL;
2454
2455         if (!intf) {
2456                 mutex_lock(&bmc->dyn_mutex);
2457 retry_bmc_lock:
2458                 if (list_empty(&bmc->intfs)) {
2459                         mutex_unlock(&bmc->dyn_mutex);
2460                         return -ENOENT;
2461                 }
2462                 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2463                                         bmc_link);
2464                 kref_get(&intf->refcount);
2465                 mutex_unlock(&bmc->dyn_mutex);
2466                 mutex_lock(&intf->bmc_reg_mutex);
2467                 mutex_lock(&bmc->dyn_mutex);
2468                 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2469                                              bmc_link)) {
2470                         mutex_unlock(&intf->bmc_reg_mutex);
2471                         kref_put(&intf->refcount, intf_free);
2472                         goto retry_bmc_lock;
2473                 }
2474         } else {
2475                 mutex_lock(&intf->bmc_reg_mutex);
2476                 bmc = intf->bmc;
2477                 mutex_lock(&bmc->dyn_mutex);
2478                 kref_get(&intf->refcount);
2479         }
2480
2481         /* If we have a valid and current ID, just return that. */
2482         if (intf->in_bmc_register ||
2483             (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2484                 goto out_noprocessing;
2485
2486         prev_guid_set = bmc->dyn_guid_set;
2487         __get_guid(intf);
2488
2489         prev_dyn_id_set = bmc->dyn_id_set;
2490         rv = __get_device_id(intf, bmc);
2491         if (rv)
2492                 goto out;
2493
2494         /*
2495          * The guid, device id, manufacturer id, and product id should
2496          * not change on a BMC.  If it does we have to do some dancing.
2497          */
2498         if (!intf->bmc_registered
2499             || (!prev_guid_set && bmc->dyn_guid_set)
2500             || (!prev_dyn_id_set && bmc->dyn_id_set)
2501             || (prev_guid_set && bmc->dyn_guid_set
2502                 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2503             || bmc->id.device_id != bmc->fetch_id.device_id
2504             || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2505             || bmc->id.product_id != bmc->fetch_id.product_id) {
2506                 struct ipmi_device_id id = bmc->fetch_id;
2507                 int guid_set = bmc->dyn_guid_set;
2508                 guid_t guid;
2509
2510                 guid = bmc->fetch_guid;
2511                 mutex_unlock(&bmc->dyn_mutex);
2512
2513                 __ipmi_bmc_unregister(intf);
2514                 /* Fill in the temporary BMC for good measure. */
2515                 intf->bmc->id = id;
2516                 intf->bmc->dyn_guid_set = guid_set;
2517                 intf->bmc->guid = guid;
2518                 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2519                         need_waiter(intf); /* Retry later on an error. */
2520                 else
2521                         __scan_channels(intf, &id);
2522
2523
2524                 if (!intf_set) {
2525                         /*
2526                          * We weren't given the interface on the
2527                          * command line, so restart the operation on
2528                          * the next interface for the BMC.
2529                          */
2530                         mutex_unlock(&intf->bmc_reg_mutex);
2531                         mutex_lock(&bmc->dyn_mutex);
2532                         goto retry_bmc_lock;
2533                 }
2534
2535                 /* We have a new BMC, set it up. */
2536                 bmc = intf->bmc;
2537                 mutex_lock(&bmc->dyn_mutex);
2538                 goto out_noprocessing;
2539         } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2540                 /* Version info changes, scan the channels again. */
2541                 __scan_channels(intf, &bmc->fetch_id);
2542
2543         bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2544
2545 out:
2546         if (rv && prev_dyn_id_set) {
2547                 rv = 0; /* Ignore failures if we have previous data. */
2548                 bmc->dyn_id_set = prev_dyn_id_set;
2549         }
2550         if (!rv) {
2551                 bmc->id = bmc->fetch_id;
2552                 if (bmc->dyn_guid_set)
2553                         bmc->guid = bmc->fetch_guid;
2554                 else if (prev_guid_set)
2555                         /*
2556                          * The guid used to be valid and it failed to fetch,
2557                          * just use the cached value.
2558                          */
2559                         bmc->dyn_guid_set = prev_guid_set;
2560         }
2561 out_noprocessing:
2562         if (!rv) {
2563                 if (id)
2564                         *id = bmc->id;
2565
2566                 if (guid_set)
2567                         *guid_set = bmc->dyn_guid_set;
2568
2569                 if (guid && bmc->dyn_guid_set)
2570                         *guid =  bmc->guid;
2571         }
2572
2573         mutex_unlock(&bmc->dyn_mutex);
2574         mutex_unlock(&intf->bmc_reg_mutex);
2575
2576         kref_put(&intf->refcount, intf_free);
2577         return rv;
2578 }
2579
2580 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2581                              struct ipmi_device_id *id,
2582                              bool *guid_set, guid_t *guid)
2583 {
2584         return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2585 }
2586
2587 static ssize_t device_id_show(struct device *dev,
2588                               struct device_attribute *attr,
2589                               char *buf)
2590 {
2591         struct bmc_device *bmc = to_bmc_device(dev);
2592         struct ipmi_device_id id;
2593         int rv;
2594
2595         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2596         if (rv)
2597                 return rv;
2598
2599         return snprintf(buf, 10, "%u\n", id.device_id);
2600 }
2601 static DEVICE_ATTR_RO(device_id);
2602
2603 static ssize_t provides_device_sdrs_show(struct device *dev,
2604                                          struct device_attribute *attr,
2605                                          char *buf)
2606 {
2607         struct bmc_device *bmc = to_bmc_device(dev);
2608         struct ipmi_device_id id;
2609         int rv;
2610
2611         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2612         if (rv)
2613                 return rv;
2614
2615         return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2616 }
2617 static DEVICE_ATTR_RO(provides_device_sdrs);
2618
2619 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2620                              char *buf)
2621 {
2622         struct bmc_device *bmc = to_bmc_device(dev);
2623         struct ipmi_device_id id;
2624         int rv;
2625
2626         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2627         if (rv)
2628                 return rv;
2629
2630         return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2631 }
2632 static DEVICE_ATTR_RO(revision);
2633
2634 static ssize_t firmware_revision_show(struct device *dev,
2635                                       struct device_attribute *attr,
2636                                       char *buf)
2637 {
2638         struct bmc_device *bmc = to_bmc_device(dev);
2639         struct ipmi_device_id id;
2640         int rv;
2641
2642         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2643         if (rv)
2644                 return rv;
2645
2646         return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2647                         id.firmware_revision_2);
2648 }
2649 static DEVICE_ATTR_RO(firmware_revision);
2650
2651 static ssize_t ipmi_version_show(struct device *dev,
2652                                  struct device_attribute *attr,
2653                                  char *buf)
2654 {
2655         struct bmc_device *bmc = to_bmc_device(dev);
2656         struct ipmi_device_id id;
2657         int rv;
2658
2659         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2660         if (rv)
2661                 return rv;
2662
2663         return snprintf(buf, 20, "%u.%u\n",
2664                         ipmi_version_major(&id),
2665                         ipmi_version_minor(&id));
2666 }
2667 static DEVICE_ATTR_RO(ipmi_version);
2668
2669 static ssize_t add_dev_support_show(struct device *dev,
2670                                     struct device_attribute *attr,
2671                                     char *buf)
2672 {
2673         struct bmc_device *bmc = to_bmc_device(dev);
2674         struct ipmi_device_id id;
2675         int rv;
2676
2677         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2678         if (rv)
2679                 return rv;
2680
2681         return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2682 }
2683 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2684                    NULL);
2685
2686 static ssize_t manufacturer_id_show(struct device *dev,
2687                                     struct device_attribute *attr,
2688                                     char *buf)
2689 {
2690         struct bmc_device *bmc = to_bmc_device(dev);
2691         struct ipmi_device_id id;
2692         int rv;
2693
2694         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2695         if (rv)
2696                 return rv;
2697
2698         return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2699 }
2700 static DEVICE_ATTR_RO(manufacturer_id);
2701
2702 static ssize_t product_id_show(struct device *dev,
2703                                struct device_attribute *attr,
2704                                char *buf)
2705 {
2706         struct bmc_device *bmc = to_bmc_device(dev);
2707         struct ipmi_device_id id;
2708         int rv;
2709
2710         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2711         if (rv)
2712                 return rv;
2713
2714         return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2715 }
2716 static DEVICE_ATTR_RO(product_id);
2717
2718 static ssize_t aux_firmware_rev_show(struct device *dev,
2719                                      struct device_attribute *attr,
2720                                      char *buf)
2721 {
2722         struct bmc_device *bmc = to_bmc_device(dev);
2723         struct ipmi_device_id id;
2724         int rv;
2725
2726         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2727         if (rv)
2728                 return rv;
2729
2730         return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2731                         id.aux_firmware_revision[3],
2732                         id.aux_firmware_revision[2],
2733                         id.aux_firmware_revision[1],
2734                         id.aux_firmware_revision[0]);
2735 }
2736 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2737
2738 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2739                          char *buf)
2740 {
2741         struct bmc_device *bmc = to_bmc_device(dev);
2742         bool guid_set;
2743         guid_t guid;
2744         int rv;
2745
2746         rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2747         if (rv)
2748                 return rv;
2749         if (!guid_set)
2750                 return -ENOENT;
2751
2752         return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
2753 }
2754 static DEVICE_ATTR_RO(guid);
2755
2756 static struct attribute *bmc_dev_attrs[] = {
2757         &dev_attr_device_id.attr,
2758         &dev_attr_provides_device_sdrs.attr,
2759         &dev_attr_revision.attr,
2760         &dev_attr_firmware_revision.attr,
2761         &dev_attr_ipmi_version.attr,
2762         &dev_attr_additional_device_support.attr,
2763         &dev_attr_manufacturer_id.attr,
2764         &dev_attr_product_id.attr,
2765         &dev_attr_aux_firmware_revision.attr,
2766         &dev_attr_guid.attr,
2767         NULL
2768 };
2769
2770 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2771                                        struct attribute *attr, int idx)
2772 {
2773         struct device *dev = kobj_to_dev(kobj);
2774         struct bmc_device *bmc = to_bmc_device(dev);
2775         umode_t mode = attr->mode;
2776         int rv;
2777
2778         if (attr == &dev_attr_aux_firmware_revision.attr) {
2779                 struct ipmi_device_id id;
2780
2781                 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2782                 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2783         }
2784         if (attr == &dev_attr_guid.attr) {
2785                 bool guid_set;
2786
2787                 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2788                 return (!rv && guid_set) ? mode : 0;
2789         }
2790         return mode;
2791 }
2792
2793 static const struct attribute_group bmc_dev_attr_group = {
2794         .attrs          = bmc_dev_attrs,
2795         .is_visible     = bmc_dev_attr_is_visible,
2796 };
2797
2798 static const struct attribute_group *bmc_dev_attr_groups[] = {
2799         &bmc_dev_attr_group,
2800         NULL
2801 };
2802
2803 static const struct device_type bmc_device_type = {
2804         .groups         = bmc_dev_attr_groups,
2805 };
2806
2807 static int __find_bmc_guid(struct device *dev, void *data)
2808 {
2809         guid_t *guid = data;
2810         struct bmc_device *bmc;
2811         int rv;
2812
2813         if (dev->type != &bmc_device_type)
2814                 return 0;
2815
2816         bmc = to_bmc_device(dev);
2817         rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2818         if (rv)
2819                 rv = kref_get_unless_zero(&bmc->usecount);
2820         return rv;
2821 }
2822
2823 /*
2824  * Returns with the bmc's usecount incremented, if it is non-NULL.
2825  */
2826 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2827                                              guid_t *guid)
2828 {
2829         struct device *dev;
2830         struct bmc_device *bmc = NULL;
2831
2832         dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2833         if (dev) {
2834                 bmc = to_bmc_device(dev);
2835                 put_device(dev);
2836         }
2837         return bmc;
2838 }
2839
2840 struct prod_dev_id {
2841         unsigned int  product_id;
2842         unsigned char device_id;
2843 };
2844
2845 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2846 {
2847         struct prod_dev_id *cid = data;
2848         struct bmc_device *bmc;
2849         int rv;
2850
2851         if (dev->type != &bmc_device_type)
2852                 return 0;
2853
2854         bmc = to_bmc_device(dev);
2855         rv = (bmc->id.product_id == cid->product_id
2856               && bmc->id.device_id == cid->device_id);
2857         if (rv)
2858                 rv = kref_get_unless_zero(&bmc->usecount);
2859         return rv;
2860 }
2861
2862 /*
2863  * Returns with the bmc's usecount incremented, if it is non-NULL.
2864  */
2865 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2866         struct device_driver *drv,
2867         unsigned int product_id, unsigned char device_id)
2868 {
2869         struct prod_dev_id id = {
2870                 .product_id = product_id,
2871                 .device_id = device_id,
2872         };
2873         struct device *dev;
2874         struct bmc_device *bmc = NULL;
2875
2876         dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2877         if (dev) {
2878                 bmc = to_bmc_device(dev);
2879                 put_device(dev);
2880         }
2881         return bmc;
2882 }
2883
2884 static DEFINE_IDA(ipmi_bmc_ida);
2885
2886 static void
2887 release_bmc_device(struct device *dev)
2888 {
2889         kfree(to_bmc_device(dev));
2890 }
2891
2892 static void cleanup_bmc_work(struct work_struct *work)
2893 {
2894         struct bmc_device *bmc = container_of(work, struct bmc_device,
2895                                               remove_work);
2896         int id = bmc->pdev.id; /* Unregister overwrites id */
2897
2898         platform_device_unregister(&bmc->pdev);
2899         ida_simple_remove(&ipmi_bmc_ida, id);
2900 }
2901
2902 static void
2903 cleanup_bmc_device(struct kref *ref)
2904 {
2905         struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2906
2907         /*
2908          * Remove the platform device in a work queue to avoid issues
2909          * with removing the device attributes while reading a device
2910          * attribute.
2911          */
2912         schedule_work(&bmc->remove_work);
2913 }
2914
2915 /*
2916  * Must be called with intf->bmc_reg_mutex held.
2917  */
2918 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2919 {
2920         struct bmc_device *bmc = intf->bmc;
2921
2922         if (!intf->bmc_registered)
2923                 return;
2924
2925         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2926         sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2927         kfree(intf->my_dev_name);
2928         intf->my_dev_name = NULL;
2929
2930         mutex_lock(&bmc->dyn_mutex);
2931         list_del(&intf->bmc_link);
2932         mutex_unlock(&bmc->dyn_mutex);
2933         intf->bmc = &intf->tmp_bmc;
2934         kref_put(&bmc->usecount, cleanup_bmc_device);
2935         intf->bmc_registered = false;
2936 }
2937
2938 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2939 {
2940         mutex_lock(&intf->bmc_reg_mutex);
2941         __ipmi_bmc_unregister(intf);
2942         mutex_unlock(&intf->bmc_reg_mutex);
2943 }
2944
2945 /*
2946  * Must be called with intf->bmc_reg_mutex held.
2947  */
2948 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2949                                struct ipmi_device_id *id,
2950                                bool guid_set, guid_t *guid, int intf_num)
2951 {
2952         int               rv;
2953         struct bmc_device *bmc;
2954         struct bmc_device *old_bmc;
2955
2956         /*
2957          * platform_device_register() can cause bmc_reg_mutex to
2958          * be claimed because of the is_visible functions of
2959          * the attributes.  Eliminate possible recursion and
2960          * release the lock.
2961          */
2962         intf->in_bmc_register = true;
2963         mutex_unlock(&intf->bmc_reg_mutex);
2964
2965         /*
2966          * Try to find if there is an bmc_device struct
2967          * representing the interfaced BMC already
2968          */
2969         mutex_lock(&ipmidriver_mutex);
2970         if (guid_set)
2971                 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2972         else
2973                 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2974                                                     id->product_id,
2975                                                     id->device_id);
2976
2977         /*
2978          * If there is already an bmc_device, free the new one,
2979          * otherwise register the new BMC device
2980          */
2981         if (old_bmc) {
2982                 bmc = old_bmc;
2983                 /*
2984                  * Note: old_bmc already has usecount incremented by
2985                  * the BMC find functions.
2986                  */
2987                 intf->bmc = old_bmc;
2988                 mutex_lock(&bmc->dyn_mutex);
2989                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2990                 mutex_unlock(&bmc->dyn_mutex);
2991
2992                 dev_info(intf->si_dev,
2993                          "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2994                          bmc->id.manufacturer_id,
2995                          bmc->id.product_id,
2996                          bmc->id.device_id);
2997         } else {
2998                 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2999                 if (!bmc) {
3000                         rv = -ENOMEM;
3001                         goto out;
3002                 }
3003                 INIT_LIST_HEAD(&bmc->intfs);
3004                 mutex_init(&bmc->dyn_mutex);
3005                 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3006
3007                 bmc->id = *id;
3008                 bmc->dyn_id_set = 1;
3009                 bmc->dyn_guid_set = guid_set;
3010                 bmc->guid = *guid;
3011                 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3012
3013                 bmc->pdev.name = "ipmi_bmc";
3014
3015                 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
3016                 if (rv < 0)
3017                         goto out;
3018                 bmc->pdev.dev.driver = &ipmidriver.driver;
3019                 bmc->pdev.id = rv;
3020                 bmc->pdev.dev.release = release_bmc_device;
3021                 bmc->pdev.dev.type = &bmc_device_type;
3022                 kref_init(&bmc->usecount);
3023
3024                 intf->bmc = bmc;
3025                 mutex_lock(&bmc->dyn_mutex);
3026                 list_add_tail(&intf->bmc_link, &bmc->intfs);
3027                 mutex_unlock(&bmc->dyn_mutex);
3028
3029                 rv = platform_device_register(&bmc->pdev);
3030                 if (rv) {
3031                         dev_err(intf->si_dev,
3032                                 "Unable to register bmc device: %d\n",
3033                                 rv);
3034                         goto out_list_del;
3035                 }
3036
3037                 dev_info(intf->si_dev,
3038                          "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3039                          bmc->id.manufacturer_id,
3040                          bmc->id.product_id,
3041                          bmc->id.device_id);
3042         }
3043
3044         /*
3045          * create symlink from system interface device to bmc device
3046          * and back.
3047          */
3048         rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3049         if (rv) {
3050                 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3051                 goto out_put_bmc;
3052         }
3053
3054         if (intf_num == -1)
3055                 intf_num = intf->intf_num;
3056         intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3057         if (!intf->my_dev_name) {
3058                 rv = -ENOMEM;
3059                 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3060                         rv);
3061                 goto out_unlink1;
3062         }
3063
3064         rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3065                                intf->my_dev_name);
3066         if (rv) {
3067                 kfree(intf->my_dev_name);
3068                 intf->my_dev_name = NULL;
3069                 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3070                         rv);
3071                 goto out_free_my_dev_name;
3072         }
3073
3074         intf->bmc_registered = true;
3075
3076 out:
3077         mutex_unlock(&ipmidriver_mutex);
3078         mutex_lock(&intf->bmc_reg_mutex);
3079         intf->in_bmc_register = false;
3080         return rv;
3081
3082
3083 out_free_my_dev_name:
3084         kfree(intf->my_dev_name);
3085         intf->my_dev_name = NULL;
3086
3087 out_unlink1:
3088         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3089
3090 out_put_bmc:
3091         mutex_lock(&bmc->dyn_mutex);
3092         list_del(&intf->bmc_link);
3093         mutex_unlock(&bmc->dyn_mutex);
3094         intf->bmc = &intf->tmp_bmc;
3095         kref_put(&bmc->usecount, cleanup_bmc_device);
3096         goto out;
3097
3098 out_list_del:
3099         mutex_lock(&bmc->dyn_mutex);
3100         list_del(&intf->bmc_link);
3101         mutex_unlock(&bmc->dyn_mutex);
3102         intf->bmc = &intf->tmp_bmc;
3103         put_device(&bmc->pdev.dev);
3104         goto out;
3105 }
3106
3107 static int
3108 send_guid_cmd(struct ipmi_smi *intf, int chan)
3109 {
3110         struct kernel_ipmi_msg            msg;
3111         struct ipmi_system_interface_addr si;
3112
3113         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3114         si.channel = IPMI_BMC_CHANNEL;
3115         si.lun = 0;
3116
3117         msg.netfn = IPMI_NETFN_APP_REQUEST;
3118         msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3119         msg.data = NULL;
3120         msg.data_len = 0;
3121         return i_ipmi_request(NULL,
3122                               intf,
3123                               (struct ipmi_addr *) &si,
3124                               0,
3125                               &msg,
3126                               intf,
3127                               NULL,
3128                               NULL,
3129                               0,
3130                               intf->addrinfo[0].address,
3131                               intf->addrinfo[0].lun,
3132                               -1, 0);
3133 }
3134
3135 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3136 {
3137         struct bmc_device *bmc = intf->bmc;
3138
3139         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3140             || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3141             || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3142                 /* Not for me */
3143                 return;
3144
3145         if (msg->msg.data[0] != 0) {
3146                 /* Error from getting the GUID, the BMC doesn't have one. */
3147                 bmc->dyn_guid_set = 0;
3148                 goto out;
3149         }
3150
3151         if (msg->msg.data_len < UUID_SIZE + 1) {
3152                 bmc->dyn_guid_set = 0;
3153                 dev_warn(intf->si_dev,
3154                          "The GUID response from the BMC was too short, it was %d but should have been %d.  Assuming GUID is not available.\n",
3155                          msg->msg.data_len, UUID_SIZE + 1);
3156                 goto out;
3157         }
3158
3159         guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
3160         /*
3161          * Make sure the guid data is available before setting
3162          * dyn_guid_set.
3163          */
3164         smp_wmb();
3165         bmc->dyn_guid_set = 1;
3166  out:
3167         wake_up(&intf->waitq);
3168 }
3169
3170 static void __get_guid(struct ipmi_smi *intf)
3171 {
3172         int rv;
3173         struct bmc_device *bmc = intf->bmc;
3174
3175         bmc->dyn_guid_set = 2;
3176         intf->null_user_handler = guid_handler;
3177         rv = send_guid_cmd(intf, 0);
3178         if (rv)
3179                 /* Send failed, no GUID available. */
3180                 bmc->dyn_guid_set = 0;
3181
3182         wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3183
3184         /* dyn_guid_set makes the guid data available. */
3185         smp_rmb();
3186
3187         intf->null_user_handler = NULL;
3188 }
3189
3190 static int
3191 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3192 {
3193         struct kernel_ipmi_msg            msg;
3194         unsigned char                     data[1];
3195         struct ipmi_system_interface_addr si;
3196
3197         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3198         si.channel = IPMI_BMC_CHANNEL;
3199         si.lun = 0;
3200
3201         msg.netfn = IPMI_NETFN_APP_REQUEST;
3202         msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3203         msg.data = data;
3204         msg.data_len = 1;
3205         data[0] = chan;
3206         return i_ipmi_request(NULL,
3207                               intf,
3208                               (struct ipmi_addr *) &si,
3209                               0,
3210                               &msg,
3211                               intf,
3212                               NULL,
3213                               NULL,
3214                               0,
3215                               intf->addrinfo[0].address,
3216                               intf->addrinfo[0].lun,
3217                               -1, 0);
3218 }
3219
3220 static void
3221 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3222 {
3223         int rv = 0;
3224         int ch;
3225         unsigned int set = intf->curr_working_cset;
3226         struct ipmi_channel *chans;
3227
3228         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3229             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3230             && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3231                 /* It's the one we want */
3232                 if (msg->msg.data[0] != 0) {
3233                         /* Got an error from the channel, just go on. */
3234
3235                         if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3236                                 /*
3237                                  * If the MC does not support this
3238                                  * command, that is legal.  We just
3239                                  * assume it has one IPMB at channel
3240                                  * zero.
3241                                  */
3242                                 intf->wchannels[set].c[0].medium
3243                                         = IPMI_CHANNEL_MEDIUM_IPMB;
3244                                 intf->wchannels[set].c[0].protocol
3245                                         = IPMI_CHANNEL_PROTOCOL_IPMB;
3246
3247                                 intf->channel_list = intf->wchannels + set;
3248                                 intf->channels_ready = true;
3249                                 wake_up(&intf->waitq);
3250                                 goto out;
3251                         }
3252                         goto next_channel;
3253                 }
3254                 if (msg->msg.data_len < 4) {
3255                         /* Message not big enough, just go on. */
3256                         goto next_channel;
3257                 }
3258                 ch = intf->curr_channel;
3259                 chans = intf->wchannels[set].c;
3260                 chans[ch].medium = msg->msg.data[2] & 0x7f;
3261                 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3262
3263  next_channel:
3264                 intf->curr_channel++;
3265                 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3266                         intf->channel_list = intf->wchannels + set;
3267                         intf->channels_ready = true;
3268                         wake_up(&intf->waitq);
3269                 } else {
3270                         intf->channel_list = intf->wchannels + set;
3271                         intf->channels_ready = true;
3272                         rv = send_channel_info_cmd(intf, intf->curr_channel);
3273                 }
3274
3275                 if (rv) {
3276                         /* Got an error somehow, just give up. */
3277                         dev_warn(intf->si_dev,
3278                                  "Error sending channel information for channel %d: %d\n",
3279                                  intf->curr_channel, rv);
3280
3281                         intf->channel_list = intf->wchannels + set;
3282                         intf->channels_ready = true;
3283                         wake_up(&intf->waitq);
3284                 }
3285         }
3286  out:
3287         return;
3288 }
3289
3290 /*
3291  * Must be holding intf->bmc_reg_mutex to call this.
3292  */
3293 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3294 {
3295         int rv;
3296
3297         if (ipmi_version_major(id) > 1
3298                         || (ipmi_version_major(id) == 1
3299                             && ipmi_version_minor(id) >= 5)) {
3300                 unsigned int set;
3301
3302                 /*
3303                  * Start scanning the channels to see what is
3304                  * available.
3305                  */
3306                 set = !intf->curr_working_cset;
3307                 intf->curr_working_cset = set;
3308                 memset(&intf->wchannels[set], 0,
3309                        sizeof(struct ipmi_channel_set));
3310
3311                 intf->null_user_handler = channel_handler;
3312                 intf->curr_channel = 0;
3313                 rv = send_channel_info_cmd(intf, 0);
3314                 if (rv) {
3315                         dev_warn(intf->si_dev,
3316                                  "Error sending channel information for channel 0, %d\n",
3317                                  rv);
3318                         return -EIO;
3319                 }
3320
3321                 /* Wait for the channel info to be read. */
3322                 wait_event(intf->waitq, intf->channels_ready);
3323                 intf->null_user_handler = NULL;
3324         } else {
3325                 unsigned int set = intf->curr_working_cset;
3326
3327                 /* Assume a single IPMB channel at zero. */
3328                 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3329                 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3330                 intf->channel_list = intf->wchannels + set;
3331                 intf->channels_ready = true;
3332         }
3333
3334         return 0;
3335 }
3336
3337 static void ipmi_poll(struct ipmi_smi *intf)
3338 {
3339         if (intf->handlers->poll)
3340                 intf->handlers->poll(intf->send_info);
3341         /* In case something came in */
3342         handle_new_recv_msgs(intf);
3343 }
3344
3345 void ipmi_poll_interface(struct ipmi_user *user)
3346 {
3347         ipmi_poll(user->intf);
3348 }
3349 EXPORT_SYMBOL(ipmi_poll_interface);
3350
3351 static void redo_bmc_reg(struct work_struct *work)
3352 {
3353         struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3354                                              bmc_reg_work);
3355
3356         if (!intf->in_shutdown)
3357                 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3358
3359         kref_put(&intf->refcount, intf_free);
3360 }
3361
3362 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3363                       void                     *send_info,
3364                       struct device            *si_dev,
3365                       unsigned char            slave_addr)
3366 {
3367         int              i, j;
3368         int              rv;
3369         struct ipmi_smi *intf, *tintf;
3370         struct list_head *link;
3371         struct ipmi_device_id id;
3372
3373         /*
3374          * Make sure the driver is actually initialized, this handles
3375          * problems with initialization order.
3376          */
3377         rv = ipmi_init_msghandler();
3378         if (rv)
3379                 return rv;
3380
3381         intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3382         if (!intf)
3383                 return -ENOMEM;
3384
3385         rv = init_srcu_struct(&intf->users_srcu);
3386         if (rv) {
3387                 kfree(intf);
3388                 return rv;
3389         }
3390
3391
3392         intf->bmc = &intf->tmp_bmc;
3393         INIT_LIST_HEAD(&intf->bmc->intfs);
3394         mutex_init(&intf->bmc->dyn_mutex);
3395         INIT_LIST_HEAD(&intf->bmc_link);
3396         mutex_init(&intf->bmc_reg_mutex);
3397         intf->intf_num = -1; /* Mark it invalid for now. */
3398         kref_init(&intf->refcount);
3399         INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3400         intf->si_dev = si_dev;
3401         for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3402                 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3403                 intf->addrinfo[j].lun = 2;
3404         }
3405         if (slave_addr != 0)
3406                 intf->addrinfo[0].address = slave_addr;
3407         INIT_LIST_HEAD(&intf->users);
3408         intf->handlers = handlers;
3409         intf->send_info = send_info;
3410         spin_lock_init(&intf->seq_lock);
3411         for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3412                 intf->seq_table[j].inuse = 0;
3413                 intf->seq_table[j].seqid = 0;
3414         }
3415         intf->curr_seq = 0;
3416         spin_lock_init(&intf->waiting_rcv_msgs_lock);
3417         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3418         tasklet_init(&intf->recv_tasklet,
3419                      smi_recv_tasklet,
3420                      (unsigned long) intf);
3421         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3422         spin_lock_init(&intf->xmit_msgs_lock);
3423         INIT_LIST_HEAD(&intf->xmit_msgs);
3424         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3425         spin_lock_init(&intf->events_lock);
3426         spin_lock_init(&intf->watch_lock);
3427         atomic_set(&intf->event_waiters, 0);
3428         intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3429         INIT_LIST_HEAD(&intf->waiting_events);
3430         intf->waiting_events_count = 0;
3431         mutex_init(&intf->cmd_rcvrs_mutex);
3432         spin_lock_init(&intf->maintenance_mode_lock);
3433         INIT_LIST_HEAD(&intf->cmd_rcvrs);
3434         init_waitqueue_head(&intf->waitq);
3435         for (i = 0; i < IPMI_NUM_STATS; i++)
3436                 atomic_set(&intf->stats[i], 0);
3437
3438         mutex_lock(&ipmi_interfaces_mutex);
3439         /* Look for a hole in the numbers. */
3440         i = 0;
3441         link = &ipmi_interfaces;
3442         list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3443                 if (tintf->intf_num != i) {
3444                         link = &tintf->link;
3445                         break;
3446                 }
3447                 i++;
3448         }
3449         /* Add the new interface in numeric order. */
3450         if (i == 0)
3451                 list_add_rcu(&intf->link, &ipmi_interfaces);
3452         else
3453                 list_add_tail_rcu(&intf->link, link);
3454
3455         rv = handlers->start_processing(send_info, intf);
3456         if (rv)
3457                 goto out_err;
3458
3459         rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3460         if (rv) {
3461                 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3462                 goto out_err_started;
3463         }
3464
3465         mutex_lock(&intf->bmc_reg_mutex);
3466         rv = __scan_channels(intf, &id);
3467         mutex_unlock(&intf->bmc_reg_mutex);
3468         if (rv)
3469                 goto out_err_bmc_reg;
3470
3471         /*
3472          * Keep memory order straight for RCU readers.  Make
3473          * sure everything else is committed to memory before
3474          * setting intf_num to mark the interface valid.
3475          */
3476         smp_wmb();
3477         intf->intf_num = i;
3478         mutex_unlock(&ipmi_interfaces_mutex);
3479
3480         /* After this point the interface is legal to use. */
3481         call_smi_watchers(i, intf->si_dev);
3482
3483         return 0;
3484
3485  out_err_bmc_reg:
3486         ipmi_bmc_unregister(intf);
3487  out_err_started:
3488         if (intf->handlers->shutdown)
3489                 intf->handlers->shutdown(intf->send_info);
3490  out_err:
3491         list_del_rcu(&intf->link);
3492         mutex_unlock(&ipmi_interfaces_mutex);
3493         synchronize_srcu(&ipmi_interfaces_srcu);
3494         cleanup_srcu_struct(&intf->users_srcu);
3495         kref_put(&intf->refcount, intf_free);
3496
3497         return rv;
3498 }
3499 EXPORT_SYMBOL(ipmi_register_smi);
3500
3501 static void deliver_smi_err_response(struct ipmi_smi *intf,
3502                                      struct ipmi_smi_msg *msg,
3503                                      unsigned char err)
3504 {
3505         msg->rsp[0] = msg->data[0] | 4;
3506         msg->rsp[1] = msg->data[1];
3507         msg->rsp[2] = err;
3508         msg->rsp_size = 3;
3509         /* It's an error, so it will never requeue, no need to check return. */
3510         handle_one_recv_msg(intf, msg);
3511 }
3512
3513 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3514 {
3515         int              i;
3516         struct seq_table *ent;
3517         struct ipmi_smi_msg *msg;
3518         struct list_head *entry;
3519         struct list_head tmplist;
3520
3521         /* Clear out our transmit queues and hold the messages. */
3522         INIT_LIST_HEAD(&tmplist);
3523         list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3524         list_splice_tail(&intf->xmit_msgs, &tmplist);
3525
3526         /* Current message first, to preserve order */
3527         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3528                 /* Wait for the message to clear out. */
3529                 schedule_timeout(1);
3530         }
3531
3532         /* No need for locks, the interface is down. */
3533
3534         /*
3535          * Return errors for all pending messages in queue and in the
3536          * tables waiting for remote responses.
3537          */
3538         while (!list_empty(&tmplist)) {
3539                 entry = tmplist.next;
3540                 list_del(entry);
3541                 msg = list_entry(entry, struct ipmi_smi_msg, link);
3542                 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3543         }
3544
3545         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3546                 ent = &intf->seq_table[i];
3547                 if (!ent->inuse)
3548                         continue;
3549                 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3550         }
3551 }
3552
3553 void ipmi_unregister_smi(struct ipmi_smi *intf)
3554 {
3555         struct ipmi_smi_watcher *w;
3556         int intf_num = intf->intf_num, index;
3557
3558         mutex_lock(&ipmi_interfaces_mutex);
3559         intf->intf_num = -1;
3560         intf->in_shutdown = true;
3561         list_del_rcu(&intf->link);
3562         mutex_unlock(&ipmi_interfaces_mutex);
3563         synchronize_srcu(&ipmi_interfaces_srcu);
3564
3565         /* At this point no users can be added to the interface. */
3566
3567         /*
3568          * Call all the watcher interfaces to tell them that
3569          * an interface is going away.
3570          */
3571         mutex_lock(&smi_watchers_mutex);
3572         list_for_each_entry(w, &smi_watchers, link)
3573                 w->smi_gone(intf_num);
3574         mutex_unlock(&smi_watchers_mutex);
3575
3576         index = srcu_read_lock(&intf->users_srcu);
3577         while (!list_empty(&intf->users)) {
3578                 struct ipmi_user *user =
3579                         container_of(list_next_rcu(&intf->users),
3580                                      struct ipmi_user, link);
3581
3582                 _ipmi_destroy_user(user);
3583         }
3584         srcu_read_unlock(&intf->users_srcu, index);
3585
3586         if (intf->handlers->shutdown)
3587                 intf->handlers->shutdown(intf->send_info);
3588
3589         cleanup_smi_msgs(intf);
3590
3591         ipmi_bmc_unregister(intf);
3592
3593         cleanup_srcu_struct(&intf->users_srcu);
3594         kref_put(&intf->refcount, intf_free);
3595 }
3596 EXPORT_SYMBOL(ipmi_unregister_smi);
3597
3598 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3599                                    struct ipmi_smi_msg *msg)
3600 {
3601         struct ipmi_ipmb_addr ipmb_addr;
3602         struct ipmi_recv_msg  *recv_msg;
3603
3604         /*
3605          * This is 11, not 10, because the response must contain a
3606          * completion code.
3607          */
3608         if (msg->rsp_size < 11) {
3609                 /* Message not big enough, just ignore it. */
3610                 ipmi_inc_stat(intf, invalid_ipmb_responses);
3611                 return 0;
3612         }
3613
3614         if (msg->rsp[2] != 0) {
3615                 /* An error getting the response, just ignore it. */
3616                 return 0;
3617         }
3618
3619         ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3620         ipmb_addr.slave_addr = msg->rsp[6];
3621         ipmb_addr.channel = msg->rsp[3] & 0x0f;
3622         ipmb_addr.lun = msg->rsp[7] & 3;
3623
3624         /*
3625          * It's a response from a remote entity.  Look up the sequence
3626          * number and handle the response.
3627          */
3628         if (intf_find_seq(intf,
3629                           msg->rsp[7] >> 2,
3630                           msg->rsp[3] & 0x0f,
3631                           msg->rsp[8],
3632                           (msg->rsp[4] >> 2) & (~1),
3633                           (struct ipmi_addr *) &ipmb_addr,
3634                           &recv_msg)) {
3635                 /*
3636                  * We were unable to find the sequence number,
3637                  * so just nuke the message.
3638                  */
3639                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3640                 return 0;
3641         }
3642
3643         memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3644         /*
3645          * The other fields matched, so no need to set them, except
3646          * for netfn, which needs to be the response that was
3647          * returned, not the request value.
3648          */
3649         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3650         recv_msg->msg.data = recv_msg->msg_data;
3651         recv_msg->msg.data_len = msg->rsp_size - 10;
3652         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3653         if (deliver_response(intf, recv_msg))
3654                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3655         else
3656                 ipmi_inc_stat(intf, handled_ipmb_responses);
3657
3658         return 0;
3659 }
3660
3661 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3662                                    struct ipmi_smi_msg *msg)
3663 {
3664         struct cmd_rcvr          *rcvr;
3665         int                      rv = 0;
3666         unsigned char            netfn;
3667         unsigned char            cmd;
3668         unsigned char            chan;
3669         struct ipmi_user         *user = NULL;
3670         struct ipmi_ipmb_addr    *ipmb_addr;
3671         struct ipmi_recv_msg     *recv_msg;
3672
3673         if (msg->rsp_size < 10) {
3674                 /* Message not big enough, just ignore it. */
3675                 ipmi_inc_stat(intf, invalid_commands);
3676                 return 0;
3677         }
3678
3679         if (msg->rsp[2] != 0) {
3680                 /* An error getting the response, just ignore it. */
3681                 return 0;
3682         }
3683
3684         netfn = msg->rsp[4] >> 2;
3685         cmd = msg->rsp[8];
3686         chan = msg->rsp[3] & 0xf;
3687
3688         rcu_read_lock();
3689         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3690         if (rcvr) {
3691                 user = rcvr->user;
3692                 kref_get(&user->refcount);
3693         } else
3694                 user = NULL;
3695         rcu_read_unlock();
3696
3697         if (user == NULL) {
3698                 /* We didn't find a user, deliver an error response. */
3699                 ipmi_inc_stat(intf, unhandled_commands);
3700
3701                 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3702                 msg->data[1] = IPMI_SEND_MSG_CMD;
3703                 msg->data[2] = msg->rsp[3];
3704                 msg->data[3] = msg->rsp[6];
3705                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3706                 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3707                 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3708                 /* rqseq/lun */
3709                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3710                 msg->data[8] = msg->rsp[8]; /* cmd */
3711                 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3712                 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3713                 msg->data_size = 11;
3714
3715                 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3716
3717                 rcu_read_lock();
3718                 if (!intf->in_shutdown) {
3719                         smi_send(intf, intf->handlers, msg, 0);
3720                         /*
3721                          * We used the message, so return the value
3722                          * that causes it to not be freed or
3723                          * queued.
3724                          */
3725                         rv = -1;
3726                 }
3727                 rcu_read_unlock();
3728         } else {
3729                 recv_msg = ipmi_alloc_recv_msg();
3730                 if (!recv_msg) {
3731                         /*
3732                          * We couldn't allocate memory for the
3733                          * message, so requeue it for handling
3734                          * later.
3735                          */
3736                         rv = 1;
3737                         kref_put(&user->refcount, free_user);
3738                 } else {
3739                         /* Extract the source address from the data. */
3740                         ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3741                         ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3742                         ipmb_addr->slave_addr = msg->rsp[6];
3743                         ipmb_addr->lun = msg->rsp[7] & 3;
3744                         ipmb_addr->channel = msg->rsp[3] & 0xf;
3745
3746                         /*
3747                          * Extract the rest of the message information
3748                          * from the IPMB header.
3749                          */
3750                         recv_msg->user = user;
3751                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3752                         recv_msg->msgid = msg->rsp[7] >> 2;
3753                         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3754                         recv_msg->msg.cmd = msg->rsp[8];
3755                         recv_msg->msg.data = recv_msg->msg_data;
3756
3757                         /*
3758                          * We chop off 10, not 9 bytes because the checksum
3759                          * at the end also needs to be removed.
3760                          */
3761                         recv_msg->msg.data_len = msg->rsp_size - 10;
3762                         memcpy(recv_msg->msg_data, &msg->rsp[9],
3763                                msg->rsp_size - 10);
3764                         if (deliver_response(intf, recv_msg))
3765                                 ipmi_inc_stat(intf, unhandled_commands);
3766                         else
3767                                 ipmi_inc_stat(intf, handled_commands);
3768                 }
3769         }
3770
3771         return rv;
3772 }
3773
3774 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3775                                   struct ipmi_smi_msg *msg)
3776 {
3777         struct ipmi_lan_addr  lan_addr;
3778         struct ipmi_recv_msg  *recv_msg;
3779
3780
3781         /*
3782          * This is 13, not 12, because the response must contain a
3783          * completion code.
3784          */
3785         if (msg->rsp_size < 13) {
3786                 /* Message not big enough, just ignore it. */
3787                 ipmi_inc_stat(intf, invalid_lan_responses);
3788                 return 0;
3789         }
3790
3791         if (msg->rsp[2] != 0) {
3792                 /* An error getting the response, just ignore it. */
3793                 return 0;
3794         }
3795
3796         lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3797         lan_addr.session_handle = msg->rsp[4];
3798         lan_addr.remote_SWID = msg->rsp[8];
3799         lan_addr.local_SWID = msg->rsp[5];
3800         lan_addr.channel = msg->rsp[3] & 0x0f;
3801         lan_addr.privilege = msg->rsp[3] >> 4;
3802         lan_addr.lun = msg->rsp[9] & 3;
3803
3804         /*
3805          * It's a response from a remote entity.  Look up the sequence
3806          * number and handle the response.
3807          */
3808         if (intf_find_seq(intf,
3809                           msg->rsp[9] >> 2,
3810                           msg->rsp[3] & 0x0f,
3811                           msg->rsp[10],
3812                           (msg->rsp[6] >> 2) & (~1),
3813                           (struct ipmi_addr *) &lan_addr,
3814                           &recv_msg)) {
3815                 /*
3816                  * We were unable to find the sequence number,
3817                  * so just nuke the message.
3818                  */
3819                 ipmi_inc_stat(intf, unhandled_lan_responses);
3820                 return 0;
3821         }
3822
3823         memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3824         /*
3825          * The other fields matched, so no need to set them, except
3826          * for netfn, which needs to be the response that was
3827          * returned, not the request value.
3828          */
3829         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3830         recv_msg->msg.data = recv_msg->msg_data;
3831         recv_msg->msg.data_len = msg->rsp_size - 12;
3832         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3833         if (deliver_response(intf, recv_msg))
3834                 ipmi_inc_stat(intf, unhandled_lan_responses);
3835         else
3836                 ipmi_inc_stat(intf, handled_lan_responses);
3837
3838         return 0;
3839 }
3840
3841 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3842                                   struct ipmi_smi_msg *msg)
3843 {
3844         struct cmd_rcvr          *rcvr;
3845         int                      rv = 0;
3846         unsigned char            netfn;
3847         unsigned char            cmd;
3848         unsigned char            chan;
3849         struct ipmi_user         *user = NULL;
3850         struct ipmi_lan_addr     *lan_addr;
3851         struct ipmi_recv_msg     *recv_msg;
3852
3853         if (msg->rsp_size < 12) {
3854                 /* Message not big enough, just ignore it. */
3855                 ipmi_inc_stat(intf, invalid_commands);
3856                 return 0;
3857         }
3858
3859         if (msg->rsp[2] != 0) {
3860                 /* An error getting the response, just ignore it. */
3861                 return 0;
3862         }
3863
3864         netfn = msg->rsp[6] >> 2;
3865         cmd = msg->rsp[10];
3866         chan = msg->rsp[3] & 0xf;
3867
3868         rcu_read_lock();
3869         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3870         if (rcvr) {
3871                 user = rcvr->user;
3872                 kref_get(&user->refcount);
3873         } else
3874                 user = NULL;
3875         rcu_read_unlock();
3876
3877         if (user == NULL) {
3878                 /* We didn't find a user, just give up. */
3879                 ipmi_inc_stat(intf, unhandled_commands);
3880
3881                 /*
3882                  * Don't do anything with these messages, just allow
3883                  * them to be freed.
3884                  */
3885                 rv = 0;
3886         } else {
3887                 recv_msg = ipmi_alloc_recv_msg();
3888                 if (!recv_msg) {
3889                         /*
3890                          * We couldn't allocate memory for the
3891                          * message, so requeue it for handling later.
3892                          */
3893                         rv = 1;
3894                         kref_put(&user->refcount, free_user);
3895                 } else {
3896                         /* Extract the source address from the data. */
3897                         lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3898                         lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3899                         lan_addr->session_handle = msg->rsp[4];
3900                         lan_addr->remote_SWID = msg->rsp[8];
3901                         lan_addr->local_SWID = msg->rsp[5];
3902                         lan_addr->lun = msg->rsp[9] & 3;
3903                         lan_addr->channel = msg->rsp[3] & 0xf;
3904                         lan_addr->privilege = msg->rsp[3] >> 4;
3905
3906                         /*
3907                          * Extract the rest of the message information
3908                          * from the IPMB header.
3909                          */
3910                         recv_msg->user = user;
3911                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3912                         recv_msg->msgid = msg->rsp[9] >> 2;
3913                         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3914                         recv_msg->msg.cmd = msg->rsp[10];
3915                         recv_msg->msg.data = recv_msg->msg_data;
3916
3917                         /*
3918                          * We chop off 12, not 11 bytes because the checksum
3919                          * at the end also needs to be removed.
3920                          */
3921                         recv_msg->msg.data_len = msg->rsp_size - 12;
3922                         memcpy(recv_msg->msg_data, &msg->rsp[11],
3923                                msg->rsp_size - 12);
3924                         if (deliver_response(intf, recv_msg))
3925                                 ipmi_inc_stat(intf, unhandled_commands);
3926                         else
3927                                 ipmi_inc_stat(intf, handled_commands);
3928                 }
3929         }
3930
3931         return rv;
3932 }
3933
3934 /*
3935  * This routine will handle "Get Message" command responses with
3936  * channels that use an OEM Medium. The message format belongs to
3937  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3938  * Chapter 22, sections 22.6 and 22.24 for more details.
3939  */
3940 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3941                                   struct ipmi_smi_msg *msg)
3942 {
3943         struct cmd_rcvr       *rcvr;
3944         int                   rv = 0;
3945         unsigned char         netfn;
3946         unsigned char         cmd;
3947         unsigned char         chan;
3948         struct ipmi_user *user = NULL;
3949         struct ipmi_system_interface_addr *smi_addr;
3950         struct ipmi_recv_msg  *recv_msg;
3951
3952         /*
3953          * We expect the OEM SW to perform error checking
3954          * so we just do some basic sanity checks
3955          */
3956         if (msg->rsp_size < 4) {
3957                 /* Message not big enough, just ignore it. */
3958                 ipmi_inc_stat(intf, invalid_commands);
3959                 return 0;
3960         }
3961
3962         if (msg->rsp[2] != 0) {
3963                 /* An error getting the response, just ignore it. */
3964                 return 0;
3965         }
3966
3967         /*
3968          * This is an OEM Message so the OEM needs to know how
3969          * handle the message. We do no interpretation.
3970          */
3971         netfn = msg->rsp[0] >> 2;
3972         cmd = msg->rsp[1];
3973         chan = msg->rsp[3] & 0xf;
3974
3975         rcu_read_lock();
3976         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3977         if (rcvr) {
3978                 user = rcvr->user;
3979                 kref_get(&user->refcount);
3980         } else
3981                 user = NULL;
3982         rcu_read_unlock();
3983
3984         if (user == NULL) {
3985                 /* We didn't find a user, just give up. */
3986                 ipmi_inc_stat(intf, unhandled_commands);
3987
3988                 /*
3989                  * Don't do anything with these messages, just allow
3990                  * them to be freed.
3991                  */
3992
3993                 rv = 0;
3994         } else {
3995                 recv_msg = ipmi_alloc_recv_msg();
3996                 if (!recv_msg) {
3997                         /*
3998                          * We couldn't allocate memory for the
3999                          * message, so requeue it for handling
4000                          * later.
4001                          */
4002                         rv = 1;
4003                         kref_put(&user->refcount, free_user);
4004                 } else {
4005                         /*
4006                          * OEM Messages are expected to be delivered via
4007                          * the system interface to SMS software.  We might
4008                          * need to visit this again depending on OEM
4009                          * requirements
4010                          */
4011                         smi_addr = ((struct ipmi_system_interface_addr *)
4012                                     &recv_msg->addr);
4013                         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4014                         smi_addr->channel = IPMI_BMC_CHANNEL;
4015                         smi_addr->lun = msg->rsp[0] & 3;
4016
4017                         recv_msg->user = user;
4018                         recv_msg->user_msg_data = NULL;
4019                         recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4020                         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4021                         recv_msg->msg.cmd = msg->rsp[1];
4022                         recv_msg->msg.data = recv_msg->msg_data;
4023
4024                         /*
4025                          * The message starts at byte 4 which follows the
4026                          * the Channel Byte in the "GET MESSAGE" command
4027                          */
4028                         recv_msg->msg.data_len = msg->rsp_size - 4;
4029                         memcpy(recv_msg->msg_data, &msg->rsp[4],
4030                                msg->rsp_size - 4);
4031                         if (deliver_response(intf, recv_msg))
4032                                 ipmi_inc_stat(intf, unhandled_commands);
4033                         else
4034                                 ipmi_inc_stat(intf, handled_commands);
4035                 }
4036         }
4037
4038         return rv;
4039 }
4040
4041 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4042                                      struct ipmi_smi_msg  *msg)
4043 {
4044         struct ipmi_system_interface_addr *smi_addr;
4045
4046         recv_msg->msgid = 0;
4047         smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4048         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4049         smi_addr->channel = IPMI_BMC_CHANNEL;
4050         smi_addr->lun = msg->rsp[0] & 3;
4051         recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4052         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4053         recv_msg->msg.cmd = msg->rsp[1];
4054         memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4055         recv_msg->msg.data = recv_msg->msg_data;
4056         recv_msg->msg.data_len = msg->rsp_size - 3;
4057 }
4058
4059 static int handle_read_event_rsp(struct ipmi_smi *intf,
4060                                  struct ipmi_smi_msg *msg)
4061 {
4062         struct ipmi_recv_msg *recv_msg, *recv_msg2;
4063         struct list_head     msgs;
4064         struct ipmi_user     *user;
4065         int rv = 0, deliver_count = 0, index;
4066         unsigned long        flags;
4067
4068         if (msg->rsp_size < 19) {
4069                 /* Message is too small to be an IPMB event. */
4070                 ipmi_inc_stat(intf, invalid_events);
4071                 return 0;
4072         }
4073
4074         if (msg->rsp[2] != 0) {
4075                 /* An error getting the event, just ignore it. */
4076                 return 0;
4077         }
4078
4079         INIT_LIST_HEAD(&msgs);
4080
4081         spin_lock_irqsave(&intf->events_lock, flags);
4082
4083         ipmi_inc_stat(intf, events);
4084
4085         /*
4086          * Allocate and fill in one message for every user that is
4087          * getting events.
4088          */
4089         index = srcu_read_lock(&intf->users_srcu);
4090         list_for_each_entry_rcu(user, &intf->users, link) {
4091                 if (!user->gets_events)
4092                         continue;
4093
4094                 recv_msg = ipmi_alloc_recv_msg();
4095                 if (!recv_msg) {
4096                         rcu_read_unlock();
4097                         list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4098                                                  link) {
4099                                 list_del(&recv_msg->link);
4100                                 ipmi_free_recv_msg(recv_msg);
4101                         }
4102                         /*
4103                          * We couldn't allocate memory for the
4104                          * message, so requeue it for handling
4105                          * later.
4106                          */
4107                         rv = 1;
4108                         goto out;
4109                 }
4110
4111                 deliver_count++;
4112
4113                 copy_event_into_recv_msg(recv_msg, msg);
4114                 recv_msg->user = user;
4115                 kref_get(&user->refcount);
4116                 list_add_tail(&recv_msg->link, &msgs);
4117         }
4118         srcu_read_unlock(&intf->users_srcu, index);
4119
4120         if (deliver_count) {
4121                 /* Now deliver all the messages. */
4122                 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4123                         list_del(&recv_msg->link);
4124                         deliver_local_response(intf, recv_msg);
4125                 }
4126         } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4127                 /*
4128                  * No one to receive the message, put it in queue if there's
4129                  * not already too many things in the queue.
4130                  */
4131                 recv_msg = ipmi_alloc_recv_msg();
4132                 if (!recv_msg) {
4133                         /*
4134                          * We couldn't allocate memory for the
4135                          * message, so requeue it for handling
4136                          * later.
4137                          */
4138                         rv = 1;
4139                         goto out;
4140                 }
4141
4142                 copy_event_into_recv_msg(recv_msg, msg);
4143                 list_add_tail(&recv_msg->link, &intf->waiting_events);
4144                 intf->waiting_events_count++;
4145         } else if (!intf->event_msg_printed) {
4146                 /*
4147                  * There's too many things in the queue, discard this
4148                  * message.
4149                  */
4150                 dev_warn(intf->si_dev,
4151                          "Event queue full, discarding incoming events\n");
4152                 intf->event_msg_printed = 1;
4153         }
4154
4155  out:
4156         spin_unlock_irqrestore(&intf->events_lock, flags);
4157
4158         return rv;
4159 }
4160
4161 static int handle_bmc_rsp(struct ipmi_smi *intf,
4162                           struct ipmi_smi_msg *msg)
4163 {
4164         struct ipmi_recv_msg *recv_msg;
4165         struct ipmi_system_interface_addr *smi_addr;
4166
4167         recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4168         if (recv_msg == NULL) {
4169                 dev_warn(intf->si_dev,
4170                          "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vendor for assistance.\n");
4171                 return 0;
4172         }
4173
4174         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4175         recv_msg->msgid = msg->msgid;
4176         smi_addr = ((struct ipmi_system_interface_addr *)
4177                     &recv_msg->addr);
4178         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4179         smi_addr->channel = IPMI_BMC_CHANNEL;
4180         smi_addr->lun = msg->rsp[0] & 3;
4181         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4182         recv_msg->msg.cmd = msg->rsp[1];
4183         memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4184         recv_msg->msg.data = recv_msg->msg_data;
4185         recv_msg->msg.data_len = msg->rsp_size - 2;
4186         deliver_local_response(intf, recv_msg);
4187
4188         return 0;
4189 }
4190
4191 /*
4192  * Handle a received message.  Return 1 if the message should be requeued,
4193  * 0 if the message should be freed, or -1 if the message should not
4194  * be freed or requeued.
4195  */
4196 static int handle_one_recv_msg(struct ipmi_smi *intf,
4197                                struct ipmi_smi_msg *msg)
4198 {
4199         int requeue;
4200         int chan;
4201
4202         ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4203         if (msg->rsp_size < 2) {
4204                 /* Message is too small to be correct. */
4205                 dev_warn(intf->si_dev,
4206                          "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4207                          (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4208
4209                 /* Generate an error response for the message. */
4210                 msg->rsp[0] = msg->data[0] | (1 << 2);
4211                 msg->rsp[1] = msg->data[1];
4212                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4213                 msg->rsp_size = 3;
4214         } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4215                    || (msg->rsp[1] != msg->data[1])) {
4216                 /*
4217                  * The NetFN and Command in the response is not even
4218                  * marginally correct.
4219                  */
4220                 dev_warn(intf->si_dev,
4221                          "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4222                          (msg->data[0] >> 2) | 1, msg->data[1],
4223                          msg->rsp[0] >> 2, msg->rsp[1]);
4224
4225                 /* Generate an error response for the message. */
4226                 msg->rsp[0] = msg->data[0] | (1 << 2);
4227                 msg->rsp[1] = msg->data[1];
4228                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4229                 msg->rsp_size = 3;
4230         }
4231
4232         if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4233             && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4234             && (msg->user_data != NULL)) {
4235                 /*
4236                  * It's a response to a response we sent.  For this we
4237                  * deliver a send message response to the user.
4238                  */
4239                 struct ipmi_recv_msg *recv_msg = msg->user_data;
4240
4241                 requeue = 0;
4242                 if (msg->rsp_size < 2)
4243                         /* Message is too small to be correct. */
4244                         goto out;
4245
4246                 chan = msg->data[2] & 0x0f;
4247                 if (chan >= IPMI_MAX_CHANNELS)
4248                         /* Invalid channel number */
4249                         goto out;
4250
4251                 if (!recv_msg)
4252                         goto out;
4253
4254                 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4255                 recv_msg->msg.data = recv_msg->msg_data;
4256                 recv_msg->msg.data_len = 1;
4257                 recv_msg->msg_data[0] = msg->rsp[2];
4258                 deliver_local_response(intf, recv_msg);
4259         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4260                    && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4261                 struct ipmi_channel   *chans;
4262
4263                 /* It's from the receive queue. */
4264                 chan = msg->rsp[3] & 0xf;
4265                 if (chan >= IPMI_MAX_CHANNELS) {
4266                         /* Invalid channel number */
4267                         requeue = 0;
4268                         goto out;
4269                 }
4270
4271                 /*
4272                  * We need to make sure the channels have been initialized.
4273                  * The channel_handler routine will set the "curr_channel"
4274                  * equal to or greater than IPMI_MAX_CHANNELS when all the
4275                  * channels for this interface have been initialized.
4276                  */
4277                 if (!intf->channels_ready) {
4278                         requeue = 0; /* Throw the message away */
4279                         goto out;
4280                 }
4281
4282                 chans = READ_ONCE(intf->channel_list)->c;
4283
4284                 switch (chans[chan].medium) {
4285                 case IPMI_CHANNEL_MEDIUM_IPMB:
4286                         if (msg->rsp[4] & 0x04) {
4287                                 /*
4288                                  * It's a response, so find the
4289                                  * requesting message and send it up.
4290                                  */
4291                                 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4292                         } else {
4293                                 /*
4294                                  * It's a command to the SMS from some other
4295                                  * entity.  Handle that.
4296                                  */
4297                                 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4298                         }
4299                         break;
4300
4301                 case IPMI_CHANNEL_MEDIUM_8023LAN:
4302                 case IPMI_CHANNEL_MEDIUM_ASYNC:
4303                         if (msg->rsp[6] & 0x04) {
4304                                 /*
4305                                  * It's a response, so find the
4306                                  * requesting message and send it up.
4307                                  */
4308                                 requeue = handle_lan_get_msg_rsp(intf, msg);
4309                         } else {
4310                                 /*
4311                                  * It's a command to the SMS from some other
4312                                  * entity.  Handle that.
4313                                  */
4314                                 requeue = handle_lan_get_msg_cmd(intf, msg);
4315                         }
4316                         break;
4317
4318                 default:
4319                         /* Check for OEM Channels.  Clients had better
4320                            register for these commands. */
4321                         if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4322                             && (chans[chan].medium
4323                                 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4324                                 requeue = handle_oem_get_msg_cmd(intf, msg);
4325                         } else {
4326                                 /*
4327                                  * We don't handle the channel type, so just
4328                                  * free the message.
4329                                  */
4330                                 requeue = 0;
4331                         }
4332                 }
4333
4334         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4335                    && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4336                 /* It's an asynchronous event. */
4337                 requeue = handle_read_event_rsp(intf, msg);
4338         } else {
4339                 /* It's a response from the local BMC. */
4340                 requeue = handle_bmc_rsp(intf, msg);
4341         }
4342
4343  out:
4344         return requeue;
4345 }
4346
4347 /*
4348  * If there are messages in the queue or pretimeouts, handle them.
4349  */
4350 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4351 {
4352         struct ipmi_smi_msg  *smi_msg;
4353         unsigned long        flags = 0;
4354         int                  rv;
4355         int                  run_to_completion = intf->run_to_completion;
4356
4357         /* See if any waiting messages need to be processed. */
4358         if (!run_to_completion)
4359                 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4360         while (!list_empty(&intf->waiting_rcv_msgs)) {
4361                 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4362                                      struct ipmi_smi_msg, link);
4363                 list_del(&smi_msg->link);
4364                 if (!run_to_completion)
4365                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4366                                                flags);
4367                 rv = handle_one_recv_msg(intf, smi_msg);
4368                 if (!run_to_completion)
4369                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4370                 if (rv > 0) {
4371                         /*
4372                          * To preserve message order, quit if we
4373                          * can't handle a message.  Add the message
4374                          * back at the head, this is safe because this
4375                          * tasklet is the only thing that pulls the
4376                          * messages.
4377                          */
4378                         list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4379                         break;
4380                 } else {
4381                         if (rv == 0)
4382                                 /* Message handled */
4383                                 ipmi_free_smi_msg(smi_msg);
4384                         /* If rv < 0, fatal error, del but don't free. */
4385                 }
4386         }
4387         if (!run_to_completion)
4388                 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4389
4390         /*
4391          * If the pretimout count is non-zero, decrement one from it and
4392          * deliver pretimeouts to all the users.
4393          */
4394         if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4395                 struct ipmi_user *user;
4396                 int index;
4397
4398                 index = srcu_read_lock(&intf->users_srcu);
4399                 list_for_each_entry_rcu(user, &intf->users, link) {
4400                         if (user->handler->ipmi_watchdog_pretimeout)
4401                                 user->handler->ipmi_watchdog_pretimeout(
4402                                         user->handler_data);
4403                 }
4404                 srcu_read_unlock(&intf->users_srcu, index);
4405         }
4406 }
4407
4408 static void smi_recv_tasklet(unsigned long val)
4409 {
4410         unsigned long flags = 0; /* keep us warning-free. */
4411         struct ipmi_smi *intf = (struct ipmi_smi *) val;
4412         int run_to_completion = intf->run_to_completion;
4413         struct ipmi_smi_msg *newmsg = NULL;
4414
4415         /*
4416          * Start the next message if available.
4417          *
4418          * Do this here, not in the actual receiver, because we may deadlock
4419          * because the lower layer is allowed to hold locks while calling
4420          * message delivery.
4421          */
4422
4423         rcu_read_lock();
4424
4425         if (!run_to_completion)
4426                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4427         if (intf->curr_msg == NULL && !intf->in_shutdown) {
4428                 struct list_head *entry = NULL;
4429
4430                 /* Pick the high priority queue first. */
4431                 if (!list_empty(&intf->hp_xmit_msgs))
4432                         entry = intf->hp_xmit_msgs.next;
4433                 else if (!list_empty(&intf->xmit_msgs))
4434                         entry = intf->xmit_msgs.next;
4435
4436                 if (entry) {
4437                         list_del(entry);
4438                         newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4439                         intf->curr_msg = newmsg;
4440                 }
4441         }
4442
4443         if (!run_to_completion)
4444                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4445         if (newmsg)
4446                 intf->handlers->sender(intf->send_info, newmsg);
4447
4448         rcu_read_unlock();
4449
4450         handle_new_recv_msgs(intf);
4451 }
4452
4453 /* Handle a new message from the lower layer. */
4454 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4455                            struct ipmi_smi_msg *msg)
4456 {
4457         unsigned long flags = 0; /* keep us warning-free. */
4458         int run_to_completion = intf->run_to_completion;
4459
4460         if ((msg->data_size >= 2)
4461             && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4462             && (msg->data[1] == IPMI_SEND_MSG_CMD)
4463             && (msg->user_data == NULL)) {
4464
4465                 if (intf->in_shutdown)
4466                         goto free_msg;
4467
4468                 /*
4469                  * This is the local response to a command send, start
4470                  * the timer for these.  The user_data will not be
4471                  * NULL if this is a response send, and we will let
4472                  * response sends just go through.
4473                  */
4474
4475                 /*
4476                  * Check for errors, if we get certain errors (ones
4477                  * that mean basically we can try again later), we
4478                  * ignore them and start the timer.  Otherwise we
4479                  * report the error immediately.
4480                  */
4481                 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4482                     && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4483                     && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4484                     && (msg->rsp[2] != IPMI_BUS_ERR)
4485                     && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4486                         int ch = msg->rsp[3] & 0xf;
4487                         struct ipmi_channel *chans;
4488
4489                         /* Got an error sending the message, handle it. */
4490
4491                         chans = READ_ONCE(intf->channel_list)->c;
4492                         if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4493                             || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4494                                 ipmi_inc_stat(intf, sent_lan_command_errs);
4495                         else
4496                                 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4497                         intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4498                 } else
4499                         /* The message was sent, start the timer. */
4500                         intf_start_seq_timer(intf, msg->msgid);
4501
4502 free_msg:
4503                 ipmi_free_smi_msg(msg);
4504         } else {
4505                 /*
4506                  * To preserve message order, we keep a queue and deliver from
4507                  * a tasklet.
4508                  */
4509                 if (!run_to_completion)
4510                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4511                 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4512                 if (!run_to_completion)
4513                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4514                                                flags);
4515         }
4516
4517         if (!run_to_completion)
4518                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4519         /*
4520          * We can get an asynchronous event or receive message in addition
4521          * to commands we send.
4522          */
4523         if (msg == intf->curr_msg)
4524                 intf->curr_msg = NULL;
4525         if (!run_to_completion)
4526                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4527
4528         if (run_to_completion)
4529                 smi_recv_tasklet((unsigned long) intf);
4530         else
4531                 tasklet_schedule(&intf->recv_tasklet);
4532 }
4533 EXPORT_SYMBOL(ipmi_smi_msg_received);
4534
4535 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4536 {
4537         if (intf->in_shutdown)
4538                 return;
4539
4540         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4541         tasklet_schedule(&intf->recv_tasklet);
4542 }
4543 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4544
4545 static struct ipmi_smi_msg *
4546 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4547                   unsigned char seq, long seqid)
4548 {
4549         struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4550         if (!smi_msg)
4551                 /*
4552                  * If we can't allocate the message, then just return, we
4553                  * get 4 retries, so this should be ok.
4554                  */
4555                 return NULL;
4556
4557         memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4558         smi_msg->data_size = recv_msg->msg.data_len;
4559         smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4560
4561         ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4562
4563         return smi_msg;
4564 }
4565
4566 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4567                               struct list_head *timeouts,
4568                               unsigned long timeout_period,
4569                               int slot, unsigned long *flags,
4570                               bool *need_timer)
4571 {
4572         struct ipmi_recv_msg *msg;
4573
4574         if (intf->in_shutdown)
4575                 return;
4576
4577         if (!ent->inuse)
4578                 return;
4579
4580         if (timeout_period < ent->timeout) {
4581                 ent->timeout -= timeout_period;
4582                 *need_timer = true;
4583                 return;
4584         }
4585
4586         if (ent->retries_left == 0) {
4587                 /* The message has used all its retries. */
4588                 ent->inuse = 0;
4589                 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4590                 msg = ent->recv_msg;
4591                 list_add_tail(&msg->link, timeouts);
4592                 if (ent->broadcast)
4593                         ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4594                 else if (is_lan_addr(&ent->recv_msg->addr))
4595                         ipmi_inc_stat(intf, timed_out_lan_commands);
4596                 else
4597                         ipmi_inc_stat(intf, timed_out_ipmb_commands);
4598         } else {
4599                 struct ipmi_smi_msg *smi_msg;
4600                 /* More retries, send again. */
4601
4602                 *need_timer = true;
4603
4604                 /*
4605                  * Start with the max timer, set to normal timer after
4606                  * the message is sent.
4607                  */
4608                 ent->timeout = MAX_MSG_TIMEOUT;
4609                 ent->retries_left--;
4610                 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4611                                             ent->seqid);
4612                 if (!smi_msg) {
4613                         if (is_lan_addr(&ent->recv_msg->addr))
4614                                 ipmi_inc_stat(intf,
4615                                               dropped_rexmit_lan_commands);
4616                         else
4617                                 ipmi_inc_stat(intf,
4618                                               dropped_rexmit_ipmb_commands);
4619                         return;
4620                 }
4621
4622                 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4623
4624                 /*
4625                  * Send the new message.  We send with a zero
4626                  * priority.  It timed out, I doubt time is that
4627                  * critical now, and high priority messages are really
4628                  * only for messages to the local MC, which don't get
4629                  * resent.
4630                  */
4631                 if (intf->handlers) {
4632                         if (is_lan_addr(&ent->recv_msg->addr))
4633                                 ipmi_inc_stat(intf,
4634                                               retransmitted_lan_commands);
4635                         else
4636                                 ipmi_inc_stat(intf,
4637                                               retransmitted_ipmb_commands);
4638
4639                         smi_send(intf, intf->handlers, smi_msg, 0);
4640                 } else
4641                         ipmi_free_smi_msg(smi_msg);
4642
4643                 spin_lock_irqsave(&intf->seq_lock, *flags);
4644         }
4645 }
4646
4647 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4648                                  unsigned long timeout_period)
4649 {
4650         struct list_head     timeouts;
4651         struct ipmi_recv_msg *msg, *msg2;
4652         unsigned long        flags;
4653         int                  i;
4654         bool                 need_timer = false;
4655
4656         if (!intf->bmc_registered) {
4657                 kref_get(&intf->refcount);
4658                 if (!schedule_work(&intf->bmc_reg_work)) {
4659                         kref_put(&intf->refcount, intf_free);
4660                         need_timer = true;
4661                 }
4662         }
4663
4664         /*
4665          * Go through the seq table and find any messages that
4666          * have timed out, putting them in the timeouts
4667          * list.
4668          */
4669         INIT_LIST_HEAD(&timeouts);
4670         spin_lock_irqsave(&intf->seq_lock, flags);
4671         if (intf->ipmb_maintenance_mode_timeout) {
4672                 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4673                         intf->ipmb_maintenance_mode_timeout = 0;
4674                 else
4675                         intf->ipmb_maintenance_mode_timeout -= timeout_period;
4676         }
4677         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4678                 check_msg_timeout(intf, &intf->seq_table[i],
4679                                   &timeouts, timeout_period, i,
4680                                   &flags, &need_timer);
4681         spin_unlock_irqrestore(&intf->seq_lock, flags);
4682
4683         list_for_each_entry_safe(msg, msg2, &timeouts, link)
4684                 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4685
4686         /*
4687          * Maintenance mode handling.  Check the timeout
4688          * optimistically before we claim the lock.  It may
4689          * mean a timeout gets missed occasionally, but that
4690          * only means the timeout gets extended by one period
4691          * in that case.  No big deal, and it avoids the lock
4692          * most of the time.
4693          */
4694         if (intf->auto_maintenance_timeout > 0) {
4695                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4696                 if (intf->auto_maintenance_timeout > 0) {
4697                         intf->auto_maintenance_timeout
4698                                 -= timeout_period;
4699                         if (!intf->maintenance_mode
4700                             && (intf->auto_maintenance_timeout <= 0)) {
4701                                 intf->maintenance_mode_enable = false;
4702                                 maintenance_mode_update(intf);
4703                         }
4704                 }
4705                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4706                                        flags);
4707         }
4708
4709         tasklet_schedule(&intf->recv_tasklet);
4710
4711         return need_timer;
4712 }
4713
4714 static void ipmi_request_event(struct ipmi_smi *intf)
4715 {
4716         /* No event requests when in maintenance mode. */
4717         if (intf->maintenance_mode_enable)
4718                 return;
4719
4720         if (!intf->in_shutdown)
4721                 intf->handlers->request_events(intf->send_info);
4722 }
4723
4724 static struct timer_list ipmi_timer;
4725
4726 static atomic_t stop_operation;
4727
4728 static void ipmi_timeout(struct timer_list *unused)
4729 {
4730         struct ipmi_smi *intf;
4731         bool need_timer = false;
4732         int index;
4733
4734         if (atomic_read(&stop_operation))
4735                 return;
4736
4737         index = srcu_read_lock(&ipmi_interfaces_srcu);
4738         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4739                 if (atomic_read(&intf->event_waiters)) {
4740                         intf->ticks_to_req_ev--;
4741                         if (intf->ticks_to_req_ev == 0) {
4742                                 ipmi_request_event(intf);
4743                                 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4744                         }
4745                         need_timer = true;
4746                 }
4747
4748                 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4749         }
4750         srcu_read_unlock(&ipmi_interfaces_srcu, index);
4751
4752         if (need_timer)
4753                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4754 }
4755
4756 static void need_waiter(struct ipmi_smi *intf)
4757 {
4758         /* Racy, but worst case we start the timer twice. */
4759         if (!timer_pending(&ipmi_timer))
4760                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4761 }
4762
4763 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4764 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4765
4766 static void free_smi_msg(struct ipmi_smi_msg *msg)
4767 {
4768         atomic_dec(&smi_msg_inuse_count);
4769         kfree(msg);
4770 }
4771
4772 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4773 {
4774         struct ipmi_smi_msg *rv;
4775         rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4776         if (rv) {
4777                 rv->done = free_smi_msg;
4778                 rv->user_data = NULL;
4779                 atomic_inc(&smi_msg_inuse_count);
4780         }
4781         return rv;
4782 }
4783 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4784
4785 static void free_recv_msg(struct ipmi_recv_msg *msg)
4786 {
4787         atomic_dec(&recv_msg_inuse_count);
4788         kfree(msg);
4789 }
4790
4791 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4792 {
4793         struct ipmi_recv_msg *rv;
4794
4795         rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4796         if (rv) {
4797                 rv->user = NULL;
4798                 rv->done = free_recv_msg;
4799                 atomic_inc(&recv_msg_inuse_count);
4800         }
4801         return rv;
4802 }
4803
4804 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4805 {
4806         if (msg->user)
4807                 kref_put(&msg->user->refcount, free_user);
4808         msg->done(msg);
4809 }
4810 EXPORT_SYMBOL(ipmi_free_recv_msg);
4811
4812 static atomic_t panic_done_count = ATOMIC_INIT(0);
4813
4814 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4815 {
4816         atomic_dec(&panic_done_count);
4817 }
4818
4819 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4820 {
4821         atomic_dec(&panic_done_count);
4822 }
4823
4824 /*
4825  * Inside a panic, send a message and wait for a response.
4826  */
4827 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4828                                         struct ipmi_addr *addr,
4829                                         struct kernel_ipmi_msg *msg)
4830 {
4831         struct ipmi_smi_msg  smi_msg;
4832         struct ipmi_recv_msg recv_msg;
4833         int rv;
4834
4835         smi_msg.done = dummy_smi_done_handler;
4836         recv_msg.done = dummy_recv_done_handler;
4837         atomic_add(2, &panic_done_count);
4838         rv = i_ipmi_request(NULL,
4839                             intf,
4840                             addr,
4841                             0,
4842                             msg,
4843                             intf,
4844                             &smi_msg,
4845                             &recv_msg,
4846                             0,
4847                             intf->addrinfo[0].address,
4848                             intf->addrinfo[0].lun,
4849                             0, 1); /* Don't retry, and don't wait. */
4850         if (rv)
4851                 atomic_sub(2, &panic_done_count);
4852         else if (intf->handlers->flush_messages)
4853                 intf->handlers->flush_messages(intf->send_info);
4854
4855         while (atomic_read(&panic_done_count) != 0)
4856                 ipmi_poll(intf);
4857 }
4858
4859 static void event_receiver_fetcher(struct ipmi_smi *intf,
4860                                    struct ipmi_recv_msg *msg)
4861 {
4862         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4863             && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4864             && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4865             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4866                 /* A get event receiver command, save it. */
4867                 intf->event_receiver = msg->msg.data[1];
4868                 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4869         }
4870 }
4871
4872 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4873 {
4874         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4875             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4876             && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4877             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4878                 /*
4879                  * A get device id command, save if we are an event
4880                  * receiver or generator.
4881                  */
4882                 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4883                 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4884         }
4885 }
4886
4887 static void send_panic_events(struct ipmi_smi *intf, char *str)
4888 {
4889         struct kernel_ipmi_msg msg;
4890         unsigned char data[16];
4891         struct ipmi_system_interface_addr *si;
4892         struct ipmi_addr addr;
4893         char *p = str;
4894         struct ipmi_ipmb_addr *ipmb;
4895         int j;
4896
4897         if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4898                 return;
4899
4900         si = (struct ipmi_system_interface_addr *) &addr;
4901         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4902         si->channel = IPMI_BMC_CHANNEL;
4903         si->lun = 0;
4904
4905         /* Fill in an event telling that we have failed. */
4906         msg.netfn = 0x04; /* Sensor or Event. */
4907         msg.cmd = 2; /* Platform event command. */
4908         msg.data = data;
4909         msg.data_len = 8;
4910         data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4911         data[1] = 0x03; /* This is for IPMI 1.0. */
4912         data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4913         data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4914         data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4915
4916         /*
4917          * Put a few breadcrumbs in.  Hopefully later we can add more things
4918          * to make the panic events more useful.
4919          */
4920         if (str) {
4921                 data[3] = str[0];
4922                 data[6] = str[1];
4923                 data[7] = str[2];
4924         }
4925
4926         /* Send the event announcing the panic. */
4927         ipmi_panic_request_and_wait(intf, &addr, &msg);
4928
4929         /*
4930          * On every interface, dump a bunch of OEM event holding the
4931          * string.
4932          */
4933         if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4934                 return;
4935
4936         /*
4937          * intf_num is used as an marker to tell if the
4938          * interface is valid.  Thus we need a read barrier to
4939          * make sure data fetched before checking intf_num
4940          * won't be used.
4941          */
4942         smp_rmb();
4943
4944         /*
4945          * First job here is to figure out where to send the
4946          * OEM events.  There's no way in IPMI to send OEM
4947          * events using an event send command, so we have to
4948          * find the SEL to put them in and stick them in
4949          * there.
4950          */
4951
4952         /* Get capabilities from the get device id. */
4953         intf->local_sel_device = 0;
4954         intf->local_event_generator = 0;
4955         intf->event_receiver = 0;
4956
4957         /* Request the device info from the local MC. */
4958         msg.netfn = IPMI_NETFN_APP_REQUEST;
4959         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4960         msg.data = NULL;
4961         msg.data_len = 0;
4962         intf->null_user_handler = device_id_fetcher;
4963         ipmi_panic_request_and_wait(intf, &addr, &msg);
4964
4965         if (intf->local_event_generator) {
4966                 /* Request the event receiver from the local MC. */
4967                 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4968                 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4969                 msg.data = NULL;
4970                 msg.data_len = 0;
4971                 intf->null_user_handler = event_receiver_fetcher;
4972                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4973         }
4974         intf->null_user_handler = NULL;
4975
4976         /*
4977          * Validate the event receiver.  The low bit must not
4978          * be 1 (it must be a valid IPMB address), it cannot
4979          * be zero, and it must not be my address.
4980          */
4981         if (((intf->event_receiver & 1) == 0)
4982             && (intf->event_receiver != 0)
4983             && (intf->event_receiver != intf->addrinfo[0].address)) {
4984                 /*
4985                  * The event receiver is valid, send an IPMB
4986                  * message.
4987                  */
4988                 ipmb = (struct ipmi_ipmb_addr *) &addr;
4989                 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4990                 ipmb->channel = 0; /* FIXME - is this right? */
4991                 ipmb->lun = intf->event_receiver_lun;
4992                 ipmb->slave_addr = intf->event_receiver;
4993         } else if (intf->local_sel_device) {
4994                 /*
4995                  * The event receiver was not valid (or was
4996                  * me), but I am an SEL device, just dump it
4997                  * in my SEL.
4998                  */
4999                 si = (struct ipmi_system_interface_addr *) &addr;
5000                 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5001                 si->channel = IPMI_BMC_CHANNEL;
5002                 si->lun = 0;
5003         } else
5004                 return; /* No where to send the event. */
5005
5006         msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5007         msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5008         msg.data = data;
5009         msg.data_len = 16;
5010
5011         j = 0;
5012         while (*p) {
5013                 int size = strlen(p);
5014
5015                 if (size > 11)
5016                         size = 11;
5017                 data[0] = 0;
5018                 data[1] = 0;
5019                 data[2] = 0xf0; /* OEM event without timestamp. */
5020                 data[3] = intf->addrinfo[0].address;
5021                 data[4] = j++; /* sequence # */
5022                 /*
5023                  * Always give 11 bytes, so strncpy will fill
5024                  * it with zeroes for me.
5025                  */
5026                 strncpy(data+5, p, 11);
5027                 p += size;
5028
5029                 ipmi_panic_request_and_wait(intf, &addr, &msg);
5030         }
5031 }
5032
5033 static int has_panicked;
5034
5035 static int panic_event(struct notifier_block *this,
5036                        unsigned long         event,
5037                        void                  *ptr)
5038 {
5039         struct ipmi_smi *intf;
5040         struct ipmi_user *user;
5041
5042         if (has_panicked)
5043                 return NOTIFY_DONE;
5044         has_panicked = 1;
5045
5046         /* For every registered interface, set it to run to completion. */
5047         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5048                 if (!intf->handlers || intf->intf_num == -1)
5049                         /* Interface is not ready. */
5050                         continue;
5051
5052                 if (!intf->handlers->poll)
5053                         continue;
5054
5055                 /*
5056                  * If we were interrupted while locking xmit_msgs_lock or
5057                  * waiting_rcv_msgs_lock, the corresponding list may be
5058                  * corrupted.  In this case, drop items on the list for
5059                  * the safety.
5060                  */
5061                 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5062                         INIT_LIST_HEAD(&intf->xmit_msgs);
5063                         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5064                 } else
5065                         spin_unlock(&intf->xmit_msgs_lock);
5066
5067                 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5068                         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5069                 else
5070                         spin_unlock(&intf->waiting_rcv_msgs_lock);
5071
5072                 intf->run_to_completion = 1;
5073                 if (intf->handlers->set_run_to_completion)
5074                         intf->handlers->set_run_to_completion(intf->send_info,
5075                                                               1);
5076
5077                 list_for_each_entry_rcu(user, &intf->users, link) {
5078                         if (user->handler->ipmi_panic_handler)
5079                                 user->handler->ipmi_panic_handler(
5080                                         user->handler_data);
5081                 }
5082
5083                 send_panic_events(intf, ptr);
5084         }
5085
5086         return NOTIFY_DONE;
5087 }
5088
5089 /* Must be called with ipmi_interfaces_mutex held. */
5090 static int ipmi_register_driver(void)
5091 {
5092         int rv;
5093
5094         if (drvregistered)
5095                 return 0;
5096
5097         rv = driver_register(&ipmidriver.driver);
5098         if (rv)
5099                 pr_err("Could not register IPMI driver\n");
5100         else
5101                 drvregistered = true;
5102         return rv;
5103 }
5104
5105 static struct notifier_block panic_block = {
5106         .notifier_call  = panic_event,
5107         .next           = NULL,
5108         .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5109 };
5110
5111 static int ipmi_init_msghandler(void)
5112 {
5113         int rv;
5114
5115         mutex_lock(&ipmi_interfaces_mutex);
5116         rv = ipmi_register_driver();
5117         if (rv)
5118                 goto out;
5119         if (initialized)
5120                 goto out;
5121
5122         init_srcu_struct(&ipmi_interfaces_srcu);
5123
5124         timer_setup(&ipmi_timer, ipmi_timeout, 0);
5125         mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5126
5127         atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5128
5129         initialized = true;
5130
5131 out:
5132         mutex_unlock(&ipmi_interfaces_mutex);
5133         return rv;
5134 }
5135
5136 static int __init ipmi_init_msghandler_mod(void)
5137 {
5138         int rv;
5139
5140         pr_info("version " IPMI_DRIVER_VERSION "\n");
5141
5142         mutex_lock(&ipmi_interfaces_mutex);
5143         rv = ipmi_register_driver();
5144         mutex_unlock(&ipmi_interfaces_mutex);
5145
5146         return rv;
5147 }
5148
5149 static void __exit cleanup_ipmi(void)
5150 {
5151         int count;
5152
5153         if (initialized) {
5154                 atomic_notifier_chain_unregister(&panic_notifier_list,
5155                                                  &panic_block);
5156
5157                 /*
5158                  * This can't be called if any interfaces exist, so no worry
5159                  * about shutting down the interfaces.
5160                  */
5161
5162                 /*
5163                  * Tell the timer to stop, then wait for it to stop.  This
5164                  * avoids problems with race conditions removing the timer
5165                  * here.
5166                  */
5167                 atomic_inc(&stop_operation);
5168                 del_timer_sync(&ipmi_timer);
5169
5170                 initialized = false;
5171
5172                 /* Check for buffer leaks. */
5173                 count = atomic_read(&smi_msg_inuse_count);
5174                 if (count != 0)
5175                         pr_warn("SMI message count %d at exit\n", count);
5176                 count = atomic_read(&recv_msg_inuse_count);
5177                 if (count != 0)
5178                         pr_warn("recv message count %d at exit\n", count);
5179
5180                 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5181         }
5182         if (drvregistered)
5183                 driver_unregister(&ipmidriver.driver);
5184 }
5185 module_exit(cleanup_ipmi);
5186
5187 module_init(ipmi_init_msghandler_mod);
5188 MODULE_LICENSE("GPL");
5189 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5190 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5191                    " interface.");
5192 MODULE_VERSION(IPMI_DRIVER_VERSION);
5193 MODULE_SOFTDEP("post: ipmi_devintf");