GNU Linux-libre 4.19.207-gnu1
[releases.git] / drivers / char / ipmi / ipmi_msghandler.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * ipmi_msghandler.c
4  *
5  * Incoming and outgoing message routing for an IPMI interface.
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  */
13
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/poll.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21 #include <linux/slab.h>
22 #include <linux/ipmi.h>
23 #include <linux/ipmi_smi.h>
24 #include <linux/notifier.h>
25 #include <linux/init.h>
26 #include <linux/proc_fs.h>
27 #include <linux/rcupdate.h>
28 #include <linux/interrupt.h>
29 #include <linux/moduleparam.h>
30 #include <linux/workqueue.h>
31 #include <linux/uuid.h>
32 #include <linux/nospec.h>
33 #include <linux/vmalloc.h>
34
35 #define PFX "IPMI message handler: "
36
37 #define IPMI_DRIVER_VERSION "39.2"
38
39 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
40 static int ipmi_init_msghandler(void);
41 static void smi_recv_tasklet(unsigned long);
42 static void handle_new_recv_msgs(struct ipmi_smi *intf);
43 static void need_waiter(struct ipmi_smi *intf);
44 static int handle_one_recv_msg(struct ipmi_smi *intf,
45                                struct ipmi_smi_msg *msg);
46
47 #ifdef DEBUG
48 static void ipmi_debug_msg(const char *title, unsigned char *data,
49                            unsigned int len)
50 {
51         int i, pos;
52         char buf[100];
53
54         pos = snprintf(buf, sizeof(buf), "%s: ", title);
55         for (i = 0; i < len; i++)
56                 pos += snprintf(buf + pos, sizeof(buf) - pos,
57                                 " %2.2x", data[i]);
58         pr_debug("%s\n", buf);
59 }
60 #else
61 static void ipmi_debug_msg(const char *title, unsigned char *data,
62                            unsigned int len)
63 { }
64 #endif
65
66 static bool initialized;
67 static bool drvregistered;
68
69 enum ipmi_panic_event_op {
70         IPMI_SEND_PANIC_EVENT_NONE,
71         IPMI_SEND_PANIC_EVENT,
72         IPMI_SEND_PANIC_EVENT_STRING
73 };
74 #ifdef CONFIG_IPMI_PANIC_STRING
75 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
76 #elif defined(CONFIG_IPMI_PANIC_EVENT)
77 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
78 #else
79 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
80 #endif
81 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
82
83 static int panic_op_write_handler(const char *val,
84                                   const struct kernel_param *kp)
85 {
86         char valcp[16];
87         char *s;
88
89         strncpy(valcp, val, 15);
90         valcp[15] = '\0';
91
92         s = strstrip(valcp);
93
94         if (strcmp(s, "none") == 0)
95                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
96         else if (strcmp(s, "event") == 0)
97                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
98         else if (strcmp(s, "string") == 0)
99                 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
100         else
101                 return -EINVAL;
102
103         return 0;
104 }
105
106 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
107 {
108         switch (ipmi_send_panic_event) {
109         case IPMI_SEND_PANIC_EVENT_NONE:
110                 strcpy(buffer, "none");
111                 break;
112
113         case IPMI_SEND_PANIC_EVENT:
114                 strcpy(buffer, "event");
115                 break;
116
117         case IPMI_SEND_PANIC_EVENT_STRING:
118                 strcpy(buffer, "string");
119                 break;
120
121         default:
122                 strcpy(buffer, "???");
123                 break;
124         }
125
126         return strlen(buffer);
127 }
128
129 static const struct kernel_param_ops panic_op_ops = {
130         .set = panic_op_write_handler,
131         .get = panic_op_read_handler
132 };
133 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
134 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic.  Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
135
136
137 #define MAX_EVENTS_IN_QUEUE     25
138
139 /* Remain in auto-maintenance mode for this amount of time (in ms). */
140 static unsigned long maintenance_mode_timeout_ms = 30000;
141 module_param(maintenance_mode_timeout_ms, ulong, 0644);
142 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
143                  "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
144
145 /*
146  * Don't let a message sit in a queue forever, always time it with at lest
147  * the max message timer.  This is in milliseconds.
148  */
149 #define MAX_MSG_TIMEOUT         60000
150
151 /*
152  * Timeout times below are in milliseconds, and are done off a 1
153  * second timer.  So setting the value to 1000 would mean anything
154  * between 0 and 1000ms.  So really the only reasonable minimum
155  * setting it 2000ms, which is between 1 and 2 seconds.
156  */
157
158 /* The default timeout for message retries. */
159 static unsigned long default_retry_ms = 2000;
160 module_param(default_retry_ms, ulong, 0644);
161 MODULE_PARM_DESC(default_retry_ms,
162                  "The time (milliseconds) between retry sends");
163
164 /* The default timeout for maintenance mode message retries. */
165 static unsigned long default_maintenance_retry_ms = 3000;
166 module_param(default_maintenance_retry_ms, ulong, 0644);
167 MODULE_PARM_DESC(default_maintenance_retry_ms,
168                  "The time (milliseconds) between retry sends in maintenance mode");
169
170 /* The default maximum number of retries */
171 static unsigned int default_max_retries = 4;
172 module_param(default_max_retries, uint, 0644);
173 MODULE_PARM_DESC(default_max_retries,
174                  "The time (milliseconds) between retry sends in maintenance mode");
175
176 /* Call every ~1000 ms. */
177 #define IPMI_TIMEOUT_TIME       1000
178
179 /* How many jiffies does it take to get to the timeout time. */
180 #define IPMI_TIMEOUT_JIFFIES    ((IPMI_TIMEOUT_TIME * HZ) / 1000)
181
182 /*
183  * Request events from the queue every second (this is the number of
184  * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
185  * future, IPMI will add a way to know immediately if an event is in
186  * the queue and this silliness can go away.
187  */
188 #define IPMI_REQUEST_EV_TIME    (1000 / (IPMI_TIMEOUT_TIME))
189
190 /* How long should we cache dynamic device IDs? */
191 #define IPMI_DYN_DEV_ID_EXPIRY  (10 * HZ)
192
193 /*
194  * The main "user" data structure.
195  */
196 struct ipmi_user {
197         struct list_head link;
198
199         /*
200          * Set to NULL when the user is destroyed, a pointer to myself
201          * so srcu_dereference can be used on it.
202          */
203         struct ipmi_user *self;
204         struct srcu_struct release_barrier;
205
206         struct kref refcount;
207
208         /* The upper layer that handles receive messages. */
209         const struct ipmi_user_hndl *handler;
210         void             *handler_data;
211
212         /* The interface this user is bound to. */
213         struct ipmi_smi *intf;
214
215         /* Does this interface receive IPMI events? */
216         bool gets_events;
217
218         /* Free must run in process context for RCU cleanup. */
219         struct work_struct remove_work;
220 };
221
222 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
223         __acquires(user->release_barrier)
224 {
225         struct ipmi_user *ruser;
226
227         *index = srcu_read_lock(&user->release_barrier);
228         ruser = srcu_dereference(user->self, &user->release_barrier);
229         if (!ruser)
230                 srcu_read_unlock(&user->release_barrier, *index);
231         return ruser;
232 }
233
234 static void release_ipmi_user(struct ipmi_user *user, int index)
235 {
236         srcu_read_unlock(&user->release_barrier, index);
237 }
238
239 struct cmd_rcvr {
240         struct list_head link;
241
242         struct ipmi_user *user;
243         unsigned char netfn;
244         unsigned char cmd;
245         unsigned int  chans;
246
247         /*
248          * This is used to form a linked lised during mass deletion.
249          * Since this is in an RCU list, we cannot use the link above
250          * or change any data until the RCU period completes.  So we
251          * use this next variable during mass deletion so we can have
252          * a list and don't have to wait and restart the search on
253          * every individual deletion of a command.
254          */
255         struct cmd_rcvr *next;
256 };
257
258 struct seq_table {
259         unsigned int         inuse : 1;
260         unsigned int         broadcast : 1;
261
262         unsigned long        timeout;
263         unsigned long        orig_timeout;
264         unsigned int         retries_left;
265
266         /*
267          * To verify on an incoming send message response that this is
268          * the message that the response is for, we keep a sequence id
269          * and increment it every time we send a message.
270          */
271         long                 seqid;
272
273         /*
274          * This is held so we can properly respond to the message on a
275          * timeout, and it is used to hold the temporary data for
276          * retransmission, too.
277          */
278         struct ipmi_recv_msg *recv_msg;
279 };
280
281 /*
282  * Store the information in a msgid (long) to allow us to find a
283  * sequence table entry from the msgid.
284  */
285 #define STORE_SEQ_IN_MSGID(seq, seqid) \
286         ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
287
288 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
289         do {                                                            \
290                 seq = (((msgid) >> 26) & 0x3f);                         \
291                 seqid = ((msgid) & 0x3ffffff);                          \
292         } while (0)
293
294 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
295
296 #define IPMI_MAX_CHANNELS       16
297 struct ipmi_channel {
298         unsigned char medium;
299         unsigned char protocol;
300 };
301
302 struct ipmi_channel_set {
303         struct ipmi_channel c[IPMI_MAX_CHANNELS];
304 };
305
306 struct ipmi_my_addrinfo {
307         /*
308          * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
309          * but may be changed by the user.
310          */
311         unsigned char address;
312
313         /*
314          * My LUN.  This should generally stay the SMS LUN, but just in
315          * case...
316          */
317         unsigned char lun;
318 };
319
320 /*
321  * Note that the product id, manufacturer id, guid, and device id are
322  * immutable in this structure, so dyn_mutex is not required for
323  * accessing those.  If those change on a BMC, a new BMC is allocated.
324  */
325 struct bmc_device {
326         struct platform_device pdev;
327         struct list_head       intfs; /* Interfaces on this BMC. */
328         struct ipmi_device_id  id;
329         struct ipmi_device_id  fetch_id;
330         int                    dyn_id_set;
331         unsigned long          dyn_id_expiry;
332         struct mutex           dyn_mutex; /* Protects id, intfs, & dyn* */
333         guid_t                 guid;
334         guid_t                 fetch_guid;
335         int                    dyn_guid_set;
336         struct kref            usecount;
337         struct work_struct     remove_work;
338 };
339 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
340
341 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
342                              struct ipmi_device_id *id,
343                              bool *guid_set, guid_t *guid);
344
345 /*
346  * Various statistics for IPMI, these index stats[] in the ipmi_smi
347  * structure.
348  */
349 enum ipmi_stat_indexes {
350         /* Commands we got from the user that were invalid. */
351         IPMI_STAT_sent_invalid_commands = 0,
352
353         /* Commands we sent to the MC. */
354         IPMI_STAT_sent_local_commands,
355
356         /* Responses from the MC that were delivered to a user. */
357         IPMI_STAT_handled_local_responses,
358
359         /* Responses from the MC that were not delivered to a user. */
360         IPMI_STAT_unhandled_local_responses,
361
362         /* Commands we sent out to the IPMB bus. */
363         IPMI_STAT_sent_ipmb_commands,
364
365         /* Commands sent on the IPMB that had errors on the SEND CMD */
366         IPMI_STAT_sent_ipmb_command_errs,
367
368         /* Each retransmit increments this count. */
369         IPMI_STAT_retransmitted_ipmb_commands,
370
371         /*
372          * When a message times out (runs out of retransmits) this is
373          * incremented.
374          */
375         IPMI_STAT_timed_out_ipmb_commands,
376
377         /*
378          * This is like above, but for broadcasts.  Broadcasts are
379          * *not* included in the above count (they are expected to
380          * time out).
381          */
382         IPMI_STAT_timed_out_ipmb_broadcasts,
383
384         /* Responses I have sent to the IPMB bus. */
385         IPMI_STAT_sent_ipmb_responses,
386
387         /* The response was delivered to the user. */
388         IPMI_STAT_handled_ipmb_responses,
389
390         /* The response had invalid data in it. */
391         IPMI_STAT_invalid_ipmb_responses,
392
393         /* The response didn't have anyone waiting for it. */
394         IPMI_STAT_unhandled_ipmb_responses,
395
396         /* Commands we sent out to the IPMB bus. */
397         IPMI_STAT_sent_lan_commands,
398
399         /* Commands sent on the IPMB that had errors on the SEND CMD */
400         IPMI_STAT_sent_lan_command_errs,
401
402         /* Each retransmit increments this count. */
403         IPMI_STAT_retransmitted_lan_commands,
404
405         /*
406          * When a message times out (runs out of retransmits) this is
407          * incremented.
408          */
409         IPMI_STAT_timed_out_lan_commands,
410
411         /* Responses I have sent to the IPMB bus. */
412         IPMI_STAT_sent_lan_responses,
413
414         /* The response was delivered to the user. */
415         IPMI_STAT_handled_lan_responses,
416
417         /* The response had invalid data in it. */
418         IPMI_STAT_invalid_lan_responses,
419
420         /* The response didn't have anyone waiting for it. */
421         IPMI_STAT_unhandled_lan_responses,
422
423         /* The command was delivered to the user. */
424         IPMI_STAT_handled_commands,
425
426         /* The command had invalid data in it. */
427         IPMI_STAT_invalid_commands,
428
429         /* The command didn't have anyone waiting for it. */
430         IPMI_STAT_unhandled_commands,
431
432         /* Invalid data in an event. */
433         IPMI_STAT_invalid_events,
434
435         /* Events that were received with the proper format. */
436         IPMI_STAT_events,
437
438         /* Retransmissions on IPMB that failed. */
439         IPMI_STAT_dropped_rexmit_ipmb_commands,
440
441         /* Retransmissions on LAN that failed. */
442         IPMI_STAT_dropped_rexmit_lan_commands,
443
444         /* This *must* remain last, add new values above this. */
445         IPMI_NUM_STATS
446 };
447
448
449 #define IPMI_IPMB_NUM_SEQ       64
450 struct ipmi_smi {
451         struct module *owner;
452
453         /* What interface number are we? */
454         int intf_num;
455
456         struct kref refcount;
457
458         /* Set when the interface is being unregistered. */
459         bool in_shutdown;
460
461         /* Used for a list of interfaces. */
462         struct list_head link;
463
464         /*
465          * The list of upper layers that are using me.  seq_lock write
466          * protects this.  Read protection is with srcu.
467          */
468         struct list_head users;
469         struct srcu_struct users_srcu;
470
471         /* Used for wake ups at startup. */
472         wait_queue_head_t waitq;
473
474         /*
475          * Prevents the interface from being unregistered when the
476          * interface is used by being looked up through the BMC
477          * structure.
478          */
479         struct mutex bmc_reg_mutex;
480
481         struct bmc_device tmp_bmc;
482         struct bmc_device *bmc;
483         bool bmc_registered;
484         struct list_head bmc_link;
485         char *my_dev_name;
486         bool in_bmc_register;  /* Handle recursive situations.  Yuck. */
487         struct work_struct bmc_reg_work;
488
489         const struct ipmi_smi_handlers *handlers;
490         void                     *send_info;
491
492         /* Driver-model device for the system interface. */
493         struct device          *si_dev;
494
495         /*
496          * A table of sequence numbers for this interface.  We use the
497          * sequence numbers for IPMB messages that go out of the
498          * interface to match them up with their responses.  A routine
499          * is called periodically to time the items in this list.
500          */
501         spinlock_t       seq_lock;
502         struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
503         int curr_seq;
504
505         /*
506          * Messages queued for delivery.  If delivery fails (out of memory
507          * for instance), They will stay in here to be processed later in a
508          * periodic timer interrupt.  The tasklet is for handling received
509          * messages directly from the handler.
510          */
511         spinlock_t       waiting_rcv_msgs_lock;
512         struct list_head waiting_rcv_msgs;
513         atomic_t         watchdog_pretimeouts_to_deliver;
514         struct tasklet_struct recv_tasklet;
515
516         spinlock_t             xmit_msgs_lock;
517         struct list_head       xmit_msgs;
518         struct ipmi_smi_msg    *curr_msg;
519         struct list_head       hp_xmit_msgs;
520
521         /*
522          * The list of command receivers that are registered for commands
523          * on this interface.
524          */
525         struct mutex     cmd_rcvrs_mutex;
526         struct list_head cmd_rcvrs;
527
528         /*
529          * Events that were queues because no one was there to receive
530          * them.
531          */
532         spinlock_t       events_lock; /* For dealing with event stuff. */
533         struct list_head waiting_events;
534         unsigned int     waiting_events_count; /* How many events in queue? */
535         char             delivering_events;
536         char             event_msg_printed;
537         atomic_t         event_waiters;
538         unsigned int     ticks_to_req_ev;
539         int              last_needs_timer;
540
541         /*
542          * The event receiver for my BMC, only really used at panic
543          * shutdown as a place to store this.
544          */
545         unsigned char event_receiver;
546         unsigned char event_receiver_lun;
547         unsigned char local_sel_device;
548         unsigned char local_event_generator;
549
550         /* For handling of maintenance mode. */
551         int maintenance_mode;
552         bool maintenance_mode_enable;
553         int auto_maintenance_timeout;
554         spinlock_t maintenance_mode_lock; /* Used in a timer... */
555
556         /*
557          * If we are doing maintenance on something on IPMB, extend
558          * the timeout time to avoid timeouts writing firmware and
559          * such.
560          */
561         int ipmb_maintenance_mode_timeout;
562
563         /*
564          * A cheap hack, if this is non-null and a message to an
565          * interface comes in with a NULL user, call this routine with
566          * it.  Note that the message will still be freed by the
567          * caller.  This only works on the system interface.
568          *
569          * Protected by bmc_reg_mutex.
570          */
571         void (*null_user_handler)(struct ipmi_smi *intf,
572                                   struct ipmi_recv_msg *msg);
573
574         /*
575          * When we are scanning the channels for an SMI, this will
576          * tell which channel we are scanning.
577          */
578         int curr_channel;
579
580         /* Channel information */
581         struct ipmi_channel_set *channel_list;
582         unsigned int curr_working_cset; /* First index into the following. */
583         struct ipmi_channel_set wchannels[2];
584         struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
585         bool channels_ready;
586
587         atomic_t stats[IPMI_NUM_STATS];
588
589         /*
590          * run_to_completion duplicate of smb_info, smi_info
591          * and ipmi_serial_info structures. Used to decrease numbers of
592          * parameters passed by "low" level IPMI code.
593          */
594         int run_to_completion;
595 };
596 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
597
598 static void __get_guid(struct ipmi_smi *intf);
599 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
600 static int __ipmi_bmc_register(struct ipmi_smi *intf,
601                                struct ipmi_device_id *id,
602                                bool guid_set, guid_t *guid, int intf_num);
603 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
604
605
606 /**
607  * The driver model view of the IPMI messaging driver.
608  */
609 static struct platform_driver ipmidriver = {
610         .driver = {
611                 .name = "ipmi",
612                 .bus = &platform_bus_type
613         }
614 };
615 /*
616  * This mutex keeps us from adding the same BMC twice.
617  */
618 static DEFINE_MUTEX(ipmidriver_mutex);
619
620 static LIST_HEAD(ipmi_interfaces);
621 static DEFINE_MUTEX(ipmi_interfaces_mutex);
622 struct srcu_struct ipmi_interfaces_srcu;
623
624 /*
625  * List of watchers that want to know when smi's are added and deleted.
626  */
627 static LIST_HEAD(smi_watchers);
628 static DEFINE_MUTEX(smi_watchers_mutex);
629
630 #define ipmi_inc_stat(intf, stat) \
631         atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
632 #define ipmi_get_stat(intf, stat) \
633         ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
634
635 static const char * const addr_src_to_str[] = {
636         "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
637         "device-tree", "platform"
638 };
639
640 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
641 {
642         if (src >= SI_LAST)
643                 src = 0; /* Invalid */
644         return addr_src_to_str[src];
645 }
646 EXPORT_SYMBOL(ipmi_addr_src_to_str);
647
648 static int is_lan_addr(struct ipmi_addr *addr)
649 {
650         return addr->addr_type == IPMI_LAN_ADDR_TYPE;
651 }
652
653 static int is_ipmb_addr(struct ipmi_addr *addr)
654 {
655         return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
656 }
657
658 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
659 {
660         return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
661 }
662
663 static void free_recv_msg_list(struct list_head *q)
664 {
665         struct ipmi_recv_msg *msg, *msg2;
666
667         list_for_each_entry_safe(msg, msg2, q, link) {
668                 list_del(&msg->link);
669                 ipmi_free_recv_msg(msg);
670         }
671 }
672
673 static void free_smi_msg_list(struct list_head *q)
674 {
675         struct ipmi_smi_msg *msg, *msg2;
676
677         list_for_each_entry_safe(msg, msg2, q, link) {
678                 list_del(&msg->link);
679                 ipmi_free_smi_msg(msg);
680         }
681 }
682
683 static void clean_up_interface_data(struct ipmi_smi *intf)
684 {
685         int              i;
686         struct cmd_rcvr  *rcvr, *rcvr2;
687         struct list_head list;
688
689         tasklet_kill(&intf->recv_tasklet);
690
691         free_smi_msg_list(&intf->waiting_rcv_msgs);
692         free_recv_msg_list(&intf->waiting_events);
693
694         /*
695          * Wholesale remove all the entries from the list in the
696          * interface and wait for RCU to know that none are in use.
697          */
698         mutex_lock(&intf->cmd_rcvrs_mutex);
699         INIT_LIST_HEAD(&list);
700         list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
701         mutex_unlock(&intf->cmd_rcvrs_mutex);
702
703         list_for_each_entry_safe(rcvr, rcvr2, &list, link)
704                 kfree(rcvr);
705
706         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
707                 if ((intf->seq_table[i].inuse)
708                                         && (intf->seq_table[i].recv_msg))
709                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
710         }
711 }
712
713 static void intf_free(struct kref *ref)
714 {
715         struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
716
717         clean_up_interface_data(intf);
718         kfree(intf);
719 }
720
721 struct watcher_entry {
722         int              intf_num;
723         struct ipmi_smi  *intf;
724         struct list_head link;
725 };
726
727 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
728 {
729         struct ipmi_smi *intf;
730         int index, rv;
731
732         /*
733          * Make sure the driver is actually initialized, this handles
734          * problems with initialization order.
735          */
736         rv = ipmi_init_msghandler();
737         if (rv)
738                 return rv;
739
740         mutex_lock(&smi_watchers_mutex);
741
742         list_add(&watcher->link, &smi_watchers);
743
744         index = srcu_read_lock(&ipmi_interfaces_srcu);
745         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
746                 int intf_num = READ_ONCE(intf->intf_num);
747
748                 if (intf_num == -1)
749                         continue;
750                 watcher->new_smi(intf_num, intf->si_dev);
751         }
752         srcu_read_unlock(&ipmi_interfaces_srcu, index);
753
754         mutex_unlock(&smi_watchers_mutex);
755
756         return 0;
757 }
758 EXPORT_SYMBOL(ipmi_smi_watcher_register);
759
760 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
761 {
762         mutex_lock(&smi_watchers_mutex);
763         list_del(&watcher->link);
764         mutex_unlock(&smi_watchers_mutex);
765         return 0;
766 }
767 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
768
769 /*
770  * Must be called with smi_watchers_mutex held.
771  */
772 static void
773 call_smi_watchers(int i, struct device *dev)
774 {
775         struct ipmi_smi_watcher *w;
776
777         mutex_lock(&smi_watchers_mutex);
778         list_for_each_entry(w, &smi_watchers, link) {
779                 if (try_module_get(w->owner)) {
780                         w->new_smi(i, dev);
781                         module_put(w->owner);
782                 }
783         }
784         mutex_unlock(&smi_watchers_mutex);
785 }
786
787 static int
788 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
789 {
790         if (addr1->addr_type != addr2->addr_type)
791                 return 0;
792
793         if (addr1->channel != addr2->channel)
794                 return 0;
795
796         if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
797                 struct ipmi_system_interface_addr *smi_addr1
798                     = (struct ipmi_system_interface_addr *) addr1;
799                 struct ipmi_system_interface_addr *smi_addr2
800                     = (struct ipmi_system_interface_addr *) addr2;
801                 return (smi_addr1->lun == smi_addr2->lun);
802         }
803
804         if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
805                 struct ipmi_ipmb_addr *ipmb_addr1
806                     = (struct ipmi_ipmb_addr *) addr1;
807                 struct ipmi_ipmb_addr *ipmb_addr2
808                     = (struct ipmi_ipmb_addr *) addr2;
809
810                 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
811                         && (ipmb_addr1->lun == ipmb_addr2->lun));
812         }
813
814         if (is_lan_addr(addr1)) {
815                 struct ipmi_lan_addr *lan_addr1
816                         = (struct ipmi_lan_addr *) addr1;
817                 struct ipmi_lan_addr *lan_addr2
818                     = (struct ipmi_lan_addr *) addr2;
819
820                 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
821                         && (lan_addr1->local_SWID == lan_addr2->local_SWID)
822                         && (lan_addr1->session_handle
823                             == lan_addr2->session_handle)
824                         && (lan_addr1->lun == lan_addr2->lun));
825         }
826
827         return 1;
828 }
829
830 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
831 {
832         if (len < sizeof(struct ipmi_system_interface_addr))
833                 return -EINVAL;
834
835         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
836                 if (addr->channel != IPMI_BMC_CHANNEL)
837                         return -EINVAL;
838                 return 0;
839         }
840
841         if ((addr->channel == IPMI_BMC_CHANNEL)
842             || (addr->channel >= IPMI_MAX_CHANNELS)
843             || (addr->channel < 0))
844                 return -EINVAL;
845
846         if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
847                 if (len < sizeof(struct ipmi_ipmb_addr))
848                         return -EINVAL;
849                 return 0;
850         }
851
852         if (is_lan_addr(addr)) {
853                 if (len < sizeof(struct ipmi_lan_addr))
854                         return -EINVAL;
855                 return 0;
856         }
857
858         return -EINVAL;
859 }
860 EXPORT_SYMBOL(ipmi_validate_addr);
861
862 unsigned int ipmi_addr_length(int addr_type)
863 {
864         if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
865                 return sizeof(struct ipmi_system_interface_addr);
866
867         if ((addr_type == IPMI_IPMB_ADDR_TYPE)
868                         || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
869                 return sizeof(struct ipmi_ipmb_addr);
870
871         if (addr_type == IPMI_LAN_ADDR_TYPE)
872                 return sizeof(struct ipmi_lan_addr);
873
874         return 0;
875 }
876 EXPORT_SYMBOL(ipmi_addr_length);
877
878 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
879 {
880         int rv = 0;
881
882         if (!msg->user) {
883                 /* Special handling for NULL users. */
884                 if (intf->null_user_handler) {
885                         intf->null_user_handler(intf, msg);
886                 } else {
887                         /* No handler, so give up. */
888                         rv = -EINVAL;
889                 }
890                 ipmi_free_recv_msg(msg);
891         } else if (!oops_in_progress) {
892                 /*
893                  * If we are running in the panic context, calling the
894                  * receive handler doesn't much meaning and has a deadlock
895                  * risk.  At this moment, simply skip it in that case.
896                  */
897                 int index;
898                 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
899
900                 if (user) {
901                         user->handler->ipmi_recv_hndl(msg, user->handler_data);
902                         release_ipmi_user(user, index);
903                 } else {
904                         /* User went away, give up. */
905                         ipmi_free_recv_msg(msg);
906                         rv = -EINVAL;
907                 }
908         }
909
910         return rv;
911 }
912
913 static void deliver_local_response(struct ipmi_smi *intf,
914                                    struct ipmi_recv_msg *msg)
915 {
916         if (deliver_response(intf, msg))
917                 ipmi_inc_stat(intf, unhandled_local_responses);
918         else
919                 ipmi_inc_stat(intf, handled_local_responses);
920 }
921
922 static void deliver_err_response(struct ipmi_smi *intf,
923                                  struct ipmi_recv_msg *msg, int err)
924 {
925         msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
926         msg->msg_data[0] = err;
927         msg->msg.netfn |= 1; /* Convert to a response. */
928         msg->msg.data_len = 1;
929         msg->msg.data = msg->msg_data;
930         deliver_local_response(intf, msg);
931 }
932
933 /*
934  * Find the next sequence number not being used and add the given
935  * message with the given timeout to the sequence table.  This must be
936  * called with the interface's seq_lock held.
937  */
938 static int intf_next_seq(struct ipmi_smi      *intf,
939                          struct ipmi_recv_msg *recv_msg,
940                          unsigned long        timeout,
941                          int                  retries,
942                          int                  broadcast,
943                          unsigned char        *seq,
944                          long                 *seqid)
945 {
946         int          rv = 0;
947         unsigned int i;
948
949         if (timeout == 0)
950                 timeout = default_retry_ms;
951         if (retries < 0)
952                 retries = default_max_retries;
953
954         for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
955                                         i = (i+1)%IPMI_IPMB_NUM_SEQ) {
956                 if (!intf->seq_table[i].inuse)
957                         break;
958         }
959
960         if (!intf->seq_table[i].inuse) {
961                 intf->seq_table[i].recv_msg = recv_msg;
962
963                 /*
964                  * Start with the maximum timeout, when the send response
965                  * comes in we will start the real timer.
966                  */
967                 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
968                 intf->seq_table[i].orig_timeout = timeout;
969                 intf->seq_table[i].retries_left = retries;
970                 intf->seq_table[i].broadcast = broadcast;
971                 intf->seq_table[i].inuse = 1;
972                 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
973                 *seq = i;
974                 *seqid = intf->seq_table[i].seqid;
975                 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
976                 need_waiter(intf);
977         } else {
978                 rv = -EAGAIN;
979         }
980
981         return rv;
982 }
983
984 /*
985  * Return the receive message for the given sequence number and
986  * release the sequence number so it can be reused.  Some other data
987  * is passed in to be sure the message matches up correctly (to help
988  * guard against message coming in after their timeout and the
989  * sequence number being reused).
990  */
991 static int intf_find_seq(struct ipmi_smi      *intf,
992                          unsigned char        seq,
993                          short                channel,
994                          unsigned char        cmd,
995                          unsigned char        netfn,
996                          struct ipmi_addr     *addr,
997                          struct ipmi_recv_msg **recv_msg)
998 {
999         int           rv = -ENODEV;
1000         unsigned long flags;
1001
1002         if (seq >= IPMI_IPMB_NUM_SEQ)
1003                 return -EINVAL;
1004
1005         spin_lock_irqsave(&intf->seq_lock, flags);
1006         if (intf->seq_table[seq].inuse) {
1007                 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1008
1009                 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1010                                 && (msg->msg.netfn == netfn)
1011                                 && (ipmi_addr_equal(addr, &msg->addr))) {
1012                         *recv_msg = msg;
1013                         intf->seq_table[seq].inuse = 0;
1014                         rv = 0;
1015                 }
1016         }
1017         spin_unlock_irqrestore(&intf->seq_lock, flags);
1018
1019         return rv;
1020 }
1021
1022
1023 /* Start the timer for a specific sequence table entry. */
1024 static int intf_start_seq_timer(struct ipmi_smi *intf,
1025                                 long       msgid)
1026 {
1027         int           rv = -ENODEV;
1028         unsigned long flags;
1029         unsigned char seq;
1030         unsigned long seqid;
1031
1032
1033         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1034
1035         spin_lock_irqsave(&intf->seq_lock, flags);
1036         /*
1037          * We do this verification because the user can be deleted
1038          * while a message is outstanding.
1039          */
1040         if ((intf->seq_table[seq].inuse)
1041                                 && (intf->seq_table[seq].seqid == seqid)) {
1042                 struct seq_table *ent = &intf->seq_table[seq];
1043                 ent->timeout = ent->orig_timeout;
1044                 rv = 0;
1045         }
1046         spin_unlock_irqrestore(&intf->seq_lock, flags);
1047
1048         return rv;
1049 }
1050
1051 /* Got an error for the send message for a specific sequence number. */
1052 static int intf_err_seq(struct ipmi_smi *intf,
1053                         long         msgid,
1054                         unsigned int err)
1055 {
1056         int                  rv = -ENODEV;
1057         unsigned long        flags;
1058         unsigned char        seq;
1059         unsigned long        seqid;
1060         struct ipmi_recv_msg *msg = NULL;
1061
1062
1063         GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1064
1065         spin_lock_irqsave(&intf->seq_lock, flags);
1066         /*
1067          * We do this verification because the user can be deleted
1068          * while a message is outstanding.
1069          */
1070         if ((intf->seq_table[seq].inuse)
1071                                 && (intf->seq_table[seq].seqid == seqid)) {
1072                 struct seq_table *ent = &intf->seq_table[seq];
1073
1074                 ent->inuse = 0;
1075                 msg = ent->recv_msg;
1076                 rv = 0;
1077         }
1078         spin_unlock_irqrestore(&intf->seq_lock, flags);
1079
1080         if (msg)
1081                 deliver_err_response(intf, msg, err);
1082
1083         return rv;
1084 }
1085
1086
1087 static void free_user_work(struct work_struct *work)
1088 {
1089         struct ipmi_user *user = container_of(work, struct ipmi_user,
1090                                               remove_work);
1091
1092         cleanup_srcu_struct(&user->release_barrier);
1093         vfree(user);
1094 }
1095
1096 int ipmi_create_user(unsigned int          if_num,
1097                      const struct ipmi_user_hndl *handler,
1098                      void                  *handler_data,
1099                      struct ipmi_user      **user)
1100 {
1101         unsigned long flags;
1102         struct ipmi_user *new_user;
1103         int           rv, index;
1104         struct ipmi_smi *intf;
1105
1106         /*
1107          * There is no module usecount here, because it's not
1108          * required.  Since this can only be used by and called from
1109          * other modules, they will implicitly use this module, and
1110          * thus this can't be removed unless the other modules are
1111          * removed.
1112          */
1113
1114         if (handler == NULL)
1115                 return -EINVAL;
1116
1117         /*
1118          * Make sure the driver is actually initialized, this handles
1119          * problems with initialization order.
1120          */
1121         rv = ipmi_init_msghandler();
1122         if (rv)
1123                 return rv;
1124
1125         new_user = vzalloc(sizeof(*new_user));
1126         if (!new_user)
1127                 return -ENOMEM;
1128
1129         index = srcu_read_lock(&ipmi_interfaces_srcu);
1130         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1131                 if (intf->intf_num == if_num)
1132                         goto found;
1133         }
1134         /* Not found, return an error */
1135         rv = -EINVAL;
1136         goto out_kfree;
1137
1138  found:
1139         INIT_WORK(&new_user->remove_work, free_user_work);
1140
1141         rv = init_srcu_struct(&new_user->release_barrier);
1142         if (rv)
1143                 goto out_kfree;
1144
1145         if (!try_module_get(intf->owner)) {
1146                 rv = -ENODEV;
1147                 goto out_kfree;
1148         }
1149
1150         /* Note that each existing user holds a refcount to the interface. */
1151         kref_get(&intf->refcount);
1152
1153         kref_init(&new_user->refcount);
1154         new_user->handler = handler;
1155         new_user->handler_data = handler_data;
1156         new_user->intf = intf;
1157         new_user->gets_events = false;
1158
1159         rcu_assign_pointer(new_user->self, new_user);
1160         spin_lock_irqsave(&intf->seq_lock, flags);
1161         list_add_rcu(&new_user->link, &intf->users);
1162         spin_unlock_irqrestore(&intf->seq_lock, flags);
1163         if (handler->ipmi_watchdog_pretimeout) {
1164                 /* User wants pretimeouts, so make sure to watch for them. */
1165                 if (atomic_inc_return(&intf->event_waiters) == 1)
1166                         need_waiter(intf);
1167         }
1168         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1169         *user = new_user;
1170         return 0;
1171
1172 out_kfree:
1173         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1174         vfree(new_user);
1175         return rv;
1176 }
1177 EXPORT_SYMBOL(ipmi_create_user);
1178
1179 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1180 {
1181         int rv, index;
1182         struct ipmi_smi *intf;
1183
1184         index = srcu_read_lock(&ipmi_interfaces_srcu);
1185         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1186                 if (intf->intf_num == if_num)
1187                         goto found;
1188         }
1189         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1190
1191         /* Not found, return an error */
1192         return -EINVAL;
1193
1194 found:
1195         if (!intf->handlers->get_smi_info)
1196                 rv = -ENOTTY;
1197         else
1198                 rv = intf->handlers->get_smi_info(intf->send_info, data);
1199         srcu_read_unlock(&ipmi_interfaces_srcu, index);
1200
1201         return rv;
1202 }
1203 EXPORT_SYMBOL(ipmi_get_smi_info);
1204
1205 static void free_user(struct kref *ref)
1206 {
1207         struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1208
1209         /* SRCU cleanup must happen in task context. */
1210         schedule_work(&user->remove_work);
1211 }
1212
1213 static void _ipmi_destroy_user(struct ipmi_user *user)
1214 {
1215         struct ipmi_smi  *intf = user->intf;
1216         int              i;
1217         unsigned long    flags;
1218         struct cmd_rcvr  *rcvr;
1219         struct cmd_rcvr  *rcvrs = NULL;
1220
1221         if (!acquire_ipmi_user(user, &i)) {
1222                 /*
1223                  * The user has already been cleaned up, just make sure
1224                  * nothing is using it and return.
1225                  */
1226                 synchronize_srcu(&user->release_barrier);
1227                 return;
1228         }
1229
1230         rcu_assign_pointer(user->self, NULL);
1231         release_ipmi_user(user, i);
1232
1233         synchronize_srcu(&user->release_barrier);
1234
1235         if (user->handler->shutdown)
1236                 user->handler->shutdown(user->handler_data);
1237
1238         if (user->handler->ipmi_watchdog_pretimeout)
1239                 atomic_dec(&intf->event_waiters);
1240
1241         if (user->gets_events)
1242                 atomic_dec(&intf->event_waiters);
1243
1244         /* Remove the user from the interface's sequence table. */
1245         spin_lock_irqsave(&intf->seq_lock, flags);
1246         list_del_rcu(&user->link);
1247
1248         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1249                 if (intf->seq_table[i].inuse
1250                     && (intf->seq_table[i].recv_msg->user == user)) {
1251                         intf->seq_table[i].inuse = 0;
1252                         ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1253                 }
1254         }
1255         spin_unlock_irqrestore(&intf->seq_lock, flags);
1256
1257         /*
1258          * Remove the user from the command receiver's table.  First
1259          * we build a list of everything (not using the standard link,
1260          * since other things may be using it till we do
1261          * synchronize_srcu()) then free everything in that list.
1262          */
1263         mutex_lock(&intf->cmd_rcvrs_mutex);
1264         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1265                 if (rcvr->user == user) {
1266                         list_del_rcu(&rcvr->link);
1267                         rcvr->next = rcvrs;
1268                         rcvrs = rcvr;
1269                 }
1270         }
1271         mutex_unlock(&intf->cmd_rcvrs_mutex);
1272         synchronize_rcu();
1273         while (rcvrs) {
1274                 rcvr = rcvrs;
1275                 rcvrs = rcvr->next;
1276                 kfree(rcvr);
1277         }
1278
1279         kref_put(&intf->refcount, intf_free);
1280         module_put(intf->owner);
1281 }
1282
1283 int ipmi_destroy_user(struct ipmi_user *user)
1284 {
1285         _ipmi_destroy_user(user);
1286
1287         kref_put(&user->refcount, free_user);
1288
1289         return 0;
1290 }
1291 EXPORT_SYMBOL(ipmi_destroy_user);
1292
1293 int ipmi_get_version(struct ipmi_user *user,
1294                      unsigned char *major,
1295                      unsigned char *minor)
1296 {
1297         struct ipmi_device_id id;
1298         int rv, index;
1299
1300         user = acquire_ipmi_user(user, &index);
1301         if (!user)
1302                 return -ENODEV;
1303
1304         rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1305         if (!rv) {
1306                 *major = ipmi_version_major(&id);
1307                 *minor = ipmi_version_minor(&id);
1308         }
1309         release_ipmi_user(user, index);
1310
1311         return rv;
1312 }
1313 EXPORT_SYMBOL(ipmi_get_version);
1314
1315 int ipmi_set_my_address(struct ipmi_user *user,
1316                         unsigned int  channel,
1317                         unsigned char address)
1318 {
1319         int index, rv = 0;
1320
1321         user = acquire_ipmi_user(user, &index);
1322         if (!user)
1323                 return -ENODEV;
1324
1325         if (channel >= IPMI_MAX_CHANNELS) {
1326                 rv = -EINVAL;
1327         } else {
1328                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1329                 user->intf->addrinfo[channel].address = address;
1330         }
1331         release_ipmi_user(user, index);
1332
1333         return rv;
1334 }
1335 EXPORT_SYMBOL(ipmi_set_my_address);
1336
1337 int ipmi_get_my_address(struct ipmi_user *user,
1338                         unsigned int  channel,
1339                         unsigned char *address)
1340 {
1341         int index, rv = 0;
1342
1343         user = acquire_ipmi_user(user, &index);
1344         if (!user)
1345                 return -ENODEV;
1346
1347         if (channel >= IPMI_MAX_CHANNELS) {
1348                 rv = -EINVAL;
1349         } else {
1350                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1351                 *address = user->intf->addrinfo[channel].address;
1352         }
1353         release_ipmi_user(user, index);
1354
1355         return rv;
1356 }
1357 EXPORT_SYMBOL(ipmi_get_my_address);
1358
1359 int ipmi_set_my_LUN(struct ipmi_user *user,
1360                     unsigned int  channel,
1361                     unsigned char LUN)
1362 {
1363         int index, rv = 0;
1364
1365         user = acquire_ipmi_user(user, &index);
1366         if (!user)
1367                 return -ENODEV;
1368
1369         if (channel >= IPMI_MAX_CHANNELS) {
1370                 rv = -EINVAL;
1371         } else {
1372                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1373                 user->intf->addrinfo[channel].lun = LUN & 0x3;
1374         }
1375         release_ipmi_user(user, index);
1376
1377         return rv;
1378 }
1379 EXPORT_SYMBOL(ipmi_set_my_LUN);
1380
1381 int ipmi_get_my_LUN(struct ipmi_user *user,
1382                     unsigned int  channel,
1383                     unsigned char *address)
1384 {
1385         int index, rv = 0;
1386
1387         user = acquire_ipmi_user(user, &index);
1388         if (!user)
1389                 return -ENODEV;
1390
1391         if (channel >= IPMI_MAX_CHANNELS) {
1392                 rv = -EINVAL;
1393         } else {
1394                 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1395                 *address = user->intf->addrinfo[channel].lun;
1396         }
1397         release_ipmi_user(user, index);
1398
1399         return rv;
1400 }
1401 EXPORT_SYMBOL(ipmi_get_my_LUN);
1402
1403 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1404 {
1405         int mode, index;
1406         unsigned long flags;
1407
1408         user = acquire_ipmi_user(user, &index);
1409         if (!user)
1410                 return -ENODEV;
1411
1412         spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1413         mode = user->intf->maintenance_mode;
1414         spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1415         release_ipmi_user(user, index);
1416
1417         return mode;
1418 }
1419 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1420
1421 static void maintenance_mode_update(struct ipmi_smi *intf)
1422 {
1423         if (intf->handlers->set_maintenance_mode)
1424                 intf->handlers->set_maintenance_mode(
1425                         intf->send_info, intf->maintenance_mode_enable);
1426 }
1427
1428 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1429 {
1430         int rv = 0, index;
1431         unsigned long flags;
1432         struct ipmi_smi *intf = user->intf;
1433
1434         user = acquire_ipmi_user(user, &index);
1435         if (!user)
1436                 return -ENODEV;
1437
1438         spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1439         if (intf->maintenance_mode != mode) {
1440                 switch (mode) {
1441                 case IPMI_MAINTENANCE_MODE_AUTO:
1442                         intf->maintenance_mode_enable
1443                                 = (intf->auto_maintenance_timeout > 0);
1444                         break;
1445
1446                 case IPMI_MAINTENANCE_MODE_OFF:
1447                         intf->maintenance_mode_enable = false;
1448                         break;
1449
1450                 case IPMI_MAINTENANCE_MODE_ON:
1451                         intf->maintenance_mode_enable = true;
1452                         break;
1453
1454                 default:
1455                         rv = -EINVAL;
1456                         goto out_unlock;
1457                 }
1458                 intf->maintenance_mode = mode;
1459
1460                 maintenance_mode_update(intf);
1461         }
1462  out_unlock:
1463         spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1464         release_ipmi_user(user, index);
1465
1466         return rv;
1467 }
1468 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1469
1470 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1471 {
1472         unsigned long        flags;
1473         struct ipmi_smi      *intf = user->intf;
1474         struct ipmi_recv_msg *msg, *msg2;
1475         struct list_head     msgs;
1476         int index;
1477
1478         user = acquire_ipmi_user(user, &index);
1479         if (!user)
1480                 return -ENODEV;
1481
1482         INIT_LIST_HEAD(&msgs);
1483
1484         spin_lock_irqsave(&intf->events_lock, flags);
1485         if (user->gets_events == val)
1486                 goto out;
1487
1488         user->gets_events = val;
1489
1490         if (val) {
1491                 if (atomic_inc_return(&intf->event_waiters) == 1)
1492                         need_waiter(intf);
1493         } else {
1494                 atomic_dec(&intf->event_waiters);
1495         }
1496
1497         if (intf->delivering_events)
1498                 /*
1499                  * Another thread is delivering events for this, so
1500                  * let it handle any new events.
1501                  */
1502                 goto out;
1503
1504         /* Deliver any queued events. */
1505         while (user->gets_events && !list_empty(&intf->waiting_events)) {
1506                 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1507                         list_move_tail(&msg->link, &msgs);
1508                 intf->waiting_events_count = 0;
1509                 if (intf->event_msg_printed) {
1510                         dev_warn(intf->si_dev,
1511                                  PFX "Event queue no longer full\n");
1512                         intf->event_msg_printed = 0;
1513                 }
1514
1515                 intf->delivering_events = 1;
1516                 spin_unlock_irqrestore(&intf->events_lock, flags);
1517
1518                 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1519                         msg->user = user;
1520                         kref_get(&user->refcount);
1521                         deliver_local_response(intf, msg);
1522                 }
1523
1524                 spin_lock_irqsave(&intf->events_lock, flags);
1525                 intf->delivering_events = 0;
1526         }
1527
1528  out:
1529         spin_unlock_irqrestore(&intf->events_lock, flags);
1530         release_ipmi_user(user, index);
1531
1532         return 0;
1533 }
1534 EXPORT_SYMBOL(ipmi_set_gets_events);
1535
1536 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1537                                       unsigned char netfn,
1538                                       unsigned char cmd,
1539                                       unsigned char chan)
1540 {
1541         struct cmd_rcvr *rcvr;
1542
1543         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1544                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1545                                         && (rcvr->chans & (1 << chan)))
1546                         return rcvr;
1547         }
1548         return NULL;
1549 }
1550
1551 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1552                                  unsigned char netfn,
1553                                  unsigned char cmd,
1554                                  unsigned int  chans)
1555 {
1556         struct cmd_rcvr *rcvr;
1557
1558         list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1559                 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1560                                         && (rcvr->chans & chans))
1561                         return 0;
1562         }
1563         return 1;
1564 }
1565
1566 int ipmi_register_for_cmd(struct ipmi_user *user,
1567                           unsigned char netfn,
1568                           unsigned char cmd,
1569                           unsigned int  chans)
1570 {
1571         struct ipmi_smi *intf = user->intf;
1572         struct cmd_rcvr *rcvr;
1573         int rv = 0, index;
1574
1575         user = acquire_ipmi_user(user, &index);
1576         if (!user)
1577                 return -ENODEV;
1578
1579         rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1580         if (!rcvr) {
1581                 rv = -ENOMEM;
1582                 goto out_release;
1583         }
1584         rcvr->cmd = cmd;
1585         rcvr->netfn = netfn;
1586         rcvr->chans = chans;
1587         rcvr->user = user;
1588
1589         mutex_lock(&intf->cmd_rcvrs_mutex);
1590         /* Make sure the command/netfn is not already registered. */
1591         if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1592                 rv = -EBUSY;
1593                 goto out_unlock;
1594         }
1595
1596         if (atomic_inc_return(&intf->event_waiters) == 1)
1597                 need_waiter(intf);
1598
1599         list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1600
1601 out_unlock:
1602         mutex_unlock(&intf->cmd_rcvrs_mutex);
1603         if (rv)
1604                 kfree(rcvr);
1605 out_release:
1606         release_ipmi_user(user, index);
1607
1608         return rv;
1609 }
1610 EXPORT_SYMBOL(ipmi_register_for_cmd);
1611
1612 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1613                             unsigned char netfn,
1614                             unsigned char cmd,
1615                             unsigned int  chans)
1616 {
1617         struct ipmi_smi *intf = user->intf;
1618         struct cmd_rcvr *rcvr;
1619         struct cmd_rcvr *rcvrs = NULL;
1620         int i, rv = -ENOENT, index;
1621
1622         user = acquire_ipmi_user(user, &index);
1623         if (!user)
1624                 return -ENODEV;
1625
1626         mutex_lock(&intf->cmd_rcvrs_mutex);
1627         for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1628                 if (((1 << i) & chans) == 0)
1629                         continue;
1630                 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1631                 if (rcvr == NULL)
1632                         continue;
1633                 if (rcvr->user == user) {
1634                         rv = 0;
1635                         rcvr->chans &= ~chans;
1636                         if (rcvr->chans == 0) {
1637                                 list_del_rcu(&rcvr->link);
1638                                 rcvr->next = rcvrs;
1639                                 rcvrs = rcvr;
1640                         }
1641                 }
1642         }
1643         mutex_unlock(&intf->cmd_rcvrs_mutex);
1644         synchronize_rcu();
1645         release_ipmi_user(user, index);
1646         while (rcvrs) {
1647                 atomic_dec(&intf->event_waiters);
1648                 rcvr = rcvrs;
1649                 rcvrs = rcvr->next;
1650                 kfree(rcvr);
1651         }
1652
1653         return rv;
1654 }
1655 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1656
1657 static unsigned char
1658 ipmb_checksum(unsigned char *data, int size)
1659 {
1660         unsigned char csum = 0;
1661
1662         for (; size > 0; size--, data++)
1663                 csum += *data;
1664
1665         return -csum;
1666 }
1667
1668 static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
1669                                    struct kernel_ipmi_msg *msg,
1670                                    struct ipmi_ipmb_addr *ipmb_addr,
1671                                    long                  msgid,
1672                                    unsigned char         ipmb_seq,
1673                                    int                   broadcast,
1674                                    unsigned char         source_address,
1675                                    unsigned char         source_lun)
1676 {
1677         int i = broadcast;
1678
1679         /* Format the IPMB header data. */
1680         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1681         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1682         smi_msg->data[2] = ipmb_addr->channel;
1683         if (broadcast)
1684                 smi_msg->data[3] = 0;
1685         smi_msg->data[i+3] = ipmb_addr->slave_addr;
1686         smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1687         smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1688         smi_msg->data[i+6] = source_address;
1689         smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1690         smi_msg->data[i+8] = msg->cmd;
1691
1692         /* Now tack on the data to the message. */
1693         if (msg->data_len > 0)
1694                 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1695         smi_msg->data_size = msg->data_len + 9;
1696
1697         /* Now calculate the checksum and tack it on. */
1698         smi_msg->data[i+smi_msg->data_size]
1699                 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1700
1701         /*
1702          * Add on the checksum size and the offset from the
1703          * broadcast.
1704          */
1705         smi_msg->data_size += 1 + i;
1706
1707         smi_msg->msgid = msgid;
1708 }
1709
1710 static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
1711                                   struct kernel_ipmi_msg *msg,
1712                                   struct ipmi_lan_addr  *lan_addr,
1713                                   long                  msgid,
1714                                   unsigned char         ipmb_seq,
1715                                   unsigned char         source_lun)
1716 {
1717         /* Format the IPMB header data. */
1718         smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1719         smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1720         smi_msg->data[2] = lan_addr->channel;
1721         smi_msg->data[3] = lan_addr->session_handle;
1722         smi_msg->data[4] = lan_addr->remote_SWID;
1723         smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1724         smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1725         smi_msg->data[7] = lan_addr->local_SWID;
1726         smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1727         smi_msg->data[9] = msg->cmd;
1728
1729         /* Now tack on the data to the message. */
1730         if (msg->data_len > 0)
1731                 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1732         smi_msg->data_size = msg->data_len + 10;
1733
1734         /* Now calculate the checksum and tack it on. */
1735         smi_msg->data[smi_msg->data_size]
1736                 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1737
1738         /*
1739          * Add on the checksum size and the offset from the
1740          * broadcast.
1741          */
1742         smi_msg->data_size += 1;
1743
1744         smi_msg->msgid = msgid;
1745 }
1746
1747 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1748                                              struct ipmi_smi_msg *smi_msg,
1749                                              int priority)
1750 {
1751         if (intf->curr_msg) {
1752                 if (priority > 0)
1753                         list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1754                 else
1755                         list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1756                 smi_msg = NULL;
1757         } else {
1758                 intf->curr_msg = smi_msg;
1759         }
1760
1761         return smi_msg;
1762 }
1763
1764
1765 static void smi_send(struct ipmi_smi *intf,
1766                      const struct ipmi_smi_handlers *handlers,
1767                      struct ipmi_smi_msg *smi_msg, int priority)
1768 {
1769         int run_to_completion = intf->run_to_completion;
1770
1771         if (run_to_completion) {
1772                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1773         } else {
1774                 unsigned long flags;
1775
1776                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1777                 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1778                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1779         }
1780
1781         if (smi_msg)
1782                 handlers->sender(intf->send_info, smi_msg);
1783 }
1784
1785 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1786 {
1787         return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1788                  && ((msg->cmd == IPMI_COLD_RESET_CMD)
1789                      || (msg->cmd == IPMI_WARM_RESET_CMD)))
1790                 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1791 }
1792
1793 static int i_ipmi_req_sysintf(struct ipmi_smi        *intf,
1794                               struct ipmi_addr       *addr,
1795                               long                   msgid,
1796                               struct kernel_ipmi_msg *msg,
1797                               struct ipmi_smi_msg    *smi_msg,
1798                               struct ipmi_recv_msg   *recv_msg,
1799                               int                    retries,
1800                               unsigned int           retry_time_ms)
1801 {
1802         struct ipmi_system_interface_addr *smi_addr;
1803
1804         if (msg->netfn & 1)
1805                 /* Responses are not allowed to the SMI. */
1806                 return -EINVAL;
1807
1808         smi_addr = (struct ipmi_system_interface_addr *) addr;
1809         if (smi_addr->lun > 3) {
1810                 ipmi_inc_stat(intf, sent_invalid_commands);
1811                 return -EINVAL;
1812         }
1813
1814         memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1815
1816         if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1817             && ((msg->cmd == IPMI_SEND_MSG_CMD)
1818                 || (msg->cmd == IPMI_GET_MSG_CMD)
1819                 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1820                 /*
1821                  * We don't let the user do these, since we manage
1822                  * the sequence numbers.
1823                  */
1824                 ipmi_inc_stat(intf, sent_invalid_commands);
1825                 return -EINVAL;
1826         }
1827
1828         if (is_maintenance_mode_cmd(msg)) {
1829                 unsigned long flags;
1830
1831                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1832                 intf->auto_maintenance_timeout
1833                         = maintenance_mode_timeout_ms;
1834                 if (!intf->maintenance_mode
1835                     && !intf->maintenance_mode_enable) {
1836                         intf->maintenance_mode_enable = true;
1837                         maintenance_mode_update(intf);
1838                 }
1839                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1840                                        flags);
1841         }
1842
1843         if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1844                 ipmi_inc_stat(intf, sent_invalid_commands);
1845                 return -EMSGSIZE;
1846         }
1847
1848         smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1849         smi_msg->data[1] = msg->cmd;
1850         smi_msg->msgid = msgid;
1851         smi_msg->user_data = recv_msg;
1852         if (msg->data_len > 0)
1853                 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1854         smi_msg->data_size = msg->data_len + 2;
1855         ipmi_inc_stat(intf, sent_local_commands);
1856
1857         return 0;
1858 }
1859
1860 static int i_ipmi_req_ipmb(struct ipmi_smi        *intf,
1861                            struct ipmi_addr       *addr,
1862                            long                   msgid,
1863                            struct kernel_ipmi_msg *msg,
1864                            struct ipmi_smi_msg    *smi_msg,
1865                            struct ipmi_recv_msg   *recv_msg,
1866                            unsigned char          source_address,
1867                            unsigned char          source_lun,
1868                            int                    retries,
1869                            unsigned int           retry_time_ms)
1870 {
1871         struct ipmi_ipmb_addr *ipmb_addr;
1872         unsigned char ipmb_seq;
1873         long seqid;
1874         int broadcast = 0;
1875         struct ipmi_channel *chans;
1876         int rv = 0;
1877
1878         if (addr->channel >= IPMI_MAX_CHANNELS) {
1879                 ipmi_inc_stat(intf, sent_invalid_commands);
1880                 return -EINVAL;
1881         }
1882
1883         chans = READ_ONCE(intf->channel_list)->c;
1884
1885         if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1886                 ipmi_inc_stat(intf, sent_invalid_commands);
1887                 return -EINVAL;
1888         }
1889
1890         if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1891                 /*
1892                  * Broadcasts add a zero at the beginning of the
1893                  * message, but otherwise is the same as an IPMB
1894                  * address.
1895                  */
1896                 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1897                 broadcast = 1;
1898                 retries = 0; /* Don't retry broadcasts. */
1899         }
1900
1901         /*
1902          * 9 for the header and 1 for the checksum, plus
1903          * possibly one for the broadcast.
1904          */
1905         if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1906                 ipmi_inc_stat(intf, sent_invalid_commands);
1907                 return -EMSGSIZE;
1908         }
1909
1910         ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1911         if (ipmb_addr->lun > 3) {
1912                 ipmi_inc_stat(intf, sent_invalid_commands);
1913                 return -EINVAL;
1914         }
1915
1916         memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1917
1918         if (recv_msg->msg.netfn & 0x1) {
1919                 /*
1920                  * It's a response, so use the user's sequence
1921                  * from msgid.
1922                  */
1923                 ipmi_inc_stat(intf, sent_ipmb_responses);
1924                 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1925                                 msgid, broadcast,
1926                                 source_address, source_lun);
1927
1928                 /*
1929                  * Save the receive message so we can use it
1930                  * to deliver the response.
1931                  */
1932                 smi_msg->user_data = recv_msg;
1933         } else {
1934                 /* It's a command, so get a sequence for it. */
1935                 unsigned long flags;
1936
1937                 spin_lock_irqsave(&intf->seq_lock, flags);
1938
1939                 if (is_maintenance_mode_cmd(msg))
1940                         intf->ipmb_maintenance_mode_timeout =
1941                                 maintenance_mode_timeout_ms;
1942
1943                 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
1944                         /* Different default in maintenance mode */
1945                         retry_time_ms = default_maintenance_retry_ms;
1946
1947                 /*
1948                  * Create a sequence number with a 1 second
1949                  * timeout and 4 retries.
1950                  */
1951                 rv = intf_next_seq(intf,
1952                                    recv_msg,
1953                                    retry_time_ms,
1954                                    retries,
1955                                    broadcast,
1956                                    &ipmb_seq,
1957                                    &seqid);
1958                 if (rv)
1959                         /*
1960                          * We have used up all the sequence numbers,
1961                          * probably, so abort.
1962                          */
1963                         goto out_err;
1964
1965                 ipmi_inc_stat(intf, sent_ipmb_commands);
1966
1967                 /*
1968                  * Store the sequence number in the message,
1969                  * so that when the send message response
1970                  * comes back we can start the timer.
1971                  */
1972                 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1973                                 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1974                                 ipmb_seq, broadcast,
1975                                 source_address, source_lun);
1976
1977                 /*
1978                  * Copy the message into the recv message data, so we
1979                  * can retransmit it later if necessary.
1980                  */
1981                 memcpy(recv_msg->msg_data, smi_msg->data,
1982                        smi_msg->data_size);
1983                 recv_msg->msg.data = recv_msg->msg_data;
1984                 recv_msg->msg.data_len = smi_msg->data_size;
1985
1986                 /*
1987                  * We don't unlock until here, because we need
1988                  * to copy the completed message into the
1989                  * recv_msg before we release the lock.
1990                  * Otherwise, race conditions may bite us.  I
1991                  * know that's pretty paranoid, but I prefer
1992                  * to be correct.
1993                  */
1994 out_err:
1995                 spin_unlock_irqrestore(&intf->seq_lock, flags);
1996         }
1997
1998         return rv;
1999 }
2000
2001 static int i_ipmi_req_lan(struct ipmi_smi        *intf,
2002                           struct ipmi_addr       *addr,
2003                           long                   msgid,
2004                           struct kernel_ipmi_msg *msg,
2005                           struct ipmi_smi_msg    *smi_msg,
2006                           struct ipmi_recv_msg   *recv_msg,
2007                           unsigned char          source_lun,
2008                           int                    retries,
2009                           unsigned int           retry_time_ms)
2010 {
2011         struct ipmi_lan_addr  *lan_addr;
2012         unsigned char ipmb_seq;
2013         long seqid;
2014         struct ipmi_channel *chans;
2015         int rv = 0;
2016
2017         if (addr->channel >= IPMI_MAX_CHANNELS) {
2018                 ipmi_inc_stat(intf, sent_invalid_commands);
2019                 return -EINVAL;
2020         }
2021
2022         chans = READ_ONCE(intf->channel_list)->c;
2023
2024         if ((chans[addr->channel].medium
2025                                 != IPMI_CHANNEL_MEDIUM_8023LAN)
2026                         && (chans[addr->channel].medium
2027                             != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2028                 ipmi_inc_stat(intf, sent_invalid_commands);
2029                 return -EINVAL;
2030         }
2031
2032         /* 11 for the header and 1 for the checksum. */
2033         if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2034                 ipmi_inc_stat(intf, sent_invalid_commands);
2035                 return -EMSGSIZE;
2036         }
2037
2038         lan_addr = (struct ipmi_lan_addr *) addr;
2039         if (lan_addr->lun > 3) {
2040                 ipmi_inc_stat(intf, sent_invalid_commands);
2041                 return -EINVAL;
2042         }
2043
2044         memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2045
2046         if (recv_msg->msg.netfn & 0x1) {
2047                 /*
2048                  * It's a response, so use the user's sequence
2049                  * from msgid.
2050                  */
2051                 ipmi_inc_stat(intf, sent_lan_responses);
2052                 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2053                                msgid, source_lun);
2054
2055                 /*
2056                  * Save the receive message so we can use it
2057                  * to deliver the response.
2058                  */
2059                 smi_msg->user_data = recv_msg;
2060         } else {
2061                 /* It's a command, so get a sequence for it. */
2062                 unsigned long flags;
2063
2064                 spin_lock_irqsave(&intf->seq_lock, flags);
2065
2066                 /*
2067                  * Create a sequence number with a 1 second
2068                  * timeout and 4 retries.
2069                  */
2070                 rv = intf_next_seq(intf,
2071                                    recv_msg,
2072                                    retry_time_ms,
2073                                    retries,
2074                                    0,
2075                                    &ipmb_seq,
2076                                    &seqid);
2077                 if (rv)
2078                         /*
2079                          * We have used up all the sequence numbers,
2080                          * probably, so abort.
2081                          */
2082                         goto out_err;
2083
2084                 ipmi_inc_stat(intf, sent_lan_commands);
2085
2086                 /*
2087                  * Store the sequence number in the message,
2088                  * so that when the send message response
2089                  * comes back we can start the timer.
2090                  */
2091                 format_lan_msg(smi_msg, msg, lan_addr,
2092                                STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2093                                ipmb_seq, source_lun);
2094
2095                 /*
2096                  * Copy the message into the recv message data, so we
2097                  * can retransmit it later if necessary.
2098                  */
2099                 memcpy(recv_msg->msg_data, smi_msg->data,
2100                        smi_msg->data_size);
2101                 recv_msg->msg.data = recv_msg->msg_data;
2102                 recv_msg->msg.data_len = smi_msg->data_size;
2103
2104                 /*
2105                  * We don't unlock until here, because we need
2106                  * to copy the completed message into the
2107                  * recv_msg before we release the lock.
2108                  * Otherwise, race conditions may bite us.  I
2109                  * know that's pretty paranoid, but I prefer
2110                  * to be correct.
2111                  */
2112 out_err:
2113                 spin_unlock_irqrestore(&intf->seq_lock, flags);
2114         }
2115
2116         return rv;
2117 }
2118
2119 /*
2120  * Separate from ipmi_request so that the user does not have to be
2121  * supplied in certain circumstances (mainly at panic time).  If
2122  * messages are supplied, they will be freed, even if an error
2123  * occurs.
2124  */
2125 static int i_ipmi_request(struct ipmi_user     *user,
2126                           struct ipmi_smi      *intf,
2127                           struct ipmi_addr     *addr,
2128                           long                 msgid,
2129                           struct kernel_ipmi_msg *msg,
2130                           void                 *user_msg_data,
2131                           void                 *supplied_smi,
2132                           struct ipmi_recv_msg *supplied_recv,
2133                           int                  priority,
2134                           unsigned char        source_address,
2135                           unsigned char        source_lun,
2136                           int                  retries,
2137                           unsigned int         retry_time_ms)
2138 {
2139         struct ipmi_smi_msg *smi_msg;
2140         struct ipmi_recv_msg *recv_msg;
2141         int rv = 0;
2142
2143         if (supplied_recv)
2144                 recv_msg = supplied_recv;
2145         else {
2146                 recv_msg = ipmi_alloc_recv_msg();
2147                 if (recv_msg == NULL) {
2148                         rv = -ENOMEM;
2149                         goto out;
2150                 }
2151         }
2152         recv_msg->user_msg_data = user_msg_data;
2153
2154         if (supplied_smi)
2155                 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
2156         else {
2157                 smi_msg = ipmi_alloc_smi_msg();
2158                 if (smi_msg == NULL) {
2159                         ipmi_free_recv_msg(recv_msg);
2160                         rv = -ENOMEM;
2161                         goto out;
2162                 }
2163         }
2164
2165         rcu_read_lock();
2166         if (intf->in_shutdown) {
2167                 rv = -ENODEV;
2168                 goto out_err;
2169         }
2170
2171         recv_msg->user = user;
2172         if (user)
2173                 /* The put happens when the message is freed. */
2174                 kref_get(&user->refcount);
2175         recv_msg->msgid = msgid;
2176         /*
2177          * Store the message to send in the receive message so timeout
2178          * responses can get the proper response data.
2179          */
2180         recv_msg->msg = *msg;
2181
2182         if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2183                 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2184                                         recv_msg, retries, retry_time_ms);
2185         } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2186                 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2187                                      source_address, source_lun,
2188                                      retries, retry_time_ms);
2189         } else if (is_lan_addr(addr)) {
2190                 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2191                                     source_lun, retries, retry_time_ms);
2192         } else {
2193             /* Unknown address type. */
2194                 ipmi_inc_stat(intf, sent_invalid_commands);
2195                 rv = -EINVAL;
2196         }
2197
2198         if (rv) {
2199 out_err:
2200                 ipmi_free_smi_msg(smi_msg);
2201                 ipmi_free_recv_msg(recv_msg);
2202         } else {
2203                 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2204
2205                 smi_send(intf, intf->handlers, smi_msg, priority);
2206         }
2207         rcu_read_unlock();
2208
2209 out:
2210         return rv;
2211 }
2212
2213 static int check_addr(struct ipmi_smi  *intf,
2214                       struct ipmi_addr *addr,
2215                       unsigned char    *saddr,
2216                       unsigned char    *lun)
2217 {
2218         if (addr->channel >= IPMI_MAX_CHANNELS)
2219                 return -EINVAL;
2220         addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2221         *lun = intf->addrinfo[addr->channel].lun;
2222         *saddr = intf->addrinfo[addr->channel].address;
2223         return 0;
2224 }
2225
2226 int ipmi_request_settime(struct ipmi_user *user,
2227                          struct ipmi_addr *addr,
2228                          long             msgid,
2229                          struct kernel_ipmi_msg  *msg,
2230                          void             *user_msg_data,
2231                          int              priority,
2232                          int              retries,
2233                          unsigned int     retry_time_ms)
2234 {
2235         unsigned char saddr = 0, lun = 0;
2236         int rv, index;
2237
2238         if (!user)
2239                 return -EINVAL;
2240
2241         user = acquire_ipmi_user(user, &index);
2242         if (!user)
2243                 return -ENODEV;
2244
2245         rv = check_addr(user->intf, addr, &saddr, &lun);
2246         if (!rv)
2247                 rv = i_ipmi_request(user,
2248                                     user->intf,
2249                                     addr,
2250                                     msgid,
2251                                     msg,
2252                                     user_msg_data,
2253                                     NULL, NULL,
2254                                     priority,
2255                                     saddr,
2256                                     lun,
2257                                     retries,
2258                                     retry_time_ms);
2259
2260         release_ipmi_user(user, index);
2261         return rv;
2262 }
2263 EXPORT_SYMBOL(ipmi_request_settime);
2264
2265 int ipmi_request_supply_msgs(struct ipmi_user     *user,
2266                              struct ipmi_addr     *addr,
2267                              long                 msgid,
2268                              struct kernel_ipmi_msg *msg,
2269                              void                 *user_msg_data,
2270                              void                 *supplied_smi,
2271                              struct ipmi_recv_msg *supplied_recv,
2272                              int                  priority)
2273 {
2274         unsigned char saddr = 0, lun = 0;
2275         int rv, index;
2276
2277         if (!user)
2278                 return -EINVAL;
2279
2280         user = acquire_ipmi_user(user, &index);
2281         if (!user)
2282                 return -ENODEV;
2283
2284         rv = check_addr(user->intf, addr, &saddr, &lun);
2285         if (!rv)
2286                 rv = i_ipmi_request(user,
2287                                     user->intf,
2288                                     addr,
2289                                     msgid,
2290                                     msg,
2291                                     user_msg_data,
2292                                     supplied_smi,
2293                                     supplied_recv,
2294                                     priority,
2295                                     saddr,
2296                                     lun,
2297                                     -1, 0);
2298
2299         release_ipmi_user(user, index);
2300         return rv;
2301 }
2302 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2303
2304 static void bmc_device_id_handler(struct ipmi_smi *intf,
2305                                   struct ipmi_recv_msg *msg)
2306 {
2307         int rv;
2308
2309         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2310                         || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2311                         || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2312                 dev_warn(intf->si_dev,
2313                          PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2314                         msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2315                 return;
2316         }
2317
2318         rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2319                         msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2320         if (rv) {
2321                 dev_warn(intf->si_dev,
2322                          PFX "device id demangle failed: %d\n", rv);
2323                 intf->bmc->dyn_id_set = 0;
2324         } else {
2325                 /*
2326                  * Make sure the id data is available before setting
2327                  * dyn_id_set.
2328                  */
2329                 smp_wmb();
2330                 intf->bmc->dyn_id_set = 1;
2331         }
2332
2333         wake_up(&intf->waitq);
2334 }
2335
2336 static int
2337 send_get_device_id_cmd(struct ipmi_smi *intf)
2338 {
2339         struct ipmi_system_interface_addr si;
2340         struct kernel_ipmi_msg msg;
2341
2342         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2343         si.channel = IPMI_BMC_CHANNEL;
2344         si.lun = 0;
2345
2346         msg.netfn = IPMI_NETFN_APP_REQUEST;
2347         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2348         msg.data = NULL;
2349         msg.data_len = 0;
2350
2351         return i_ipmi_request(NULL,
2352                               intf,
2353                               (struct ipmi_addr *) &si,
2354                               0,
2355                               &msg,
2356                               intf,
2357                               NULL,
2358                               NULL,
2359                               0,
2360                               intf->addrinfo[0].address,
2361                               intf->addrinfo[0].lun,
2362                               -1, 0);
2363 }
2364
2365 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2366 {
2367         int rv;
2368
2369         bmc->dyn_id_set = 2;
2370
2371         intf->null_user_handler = bmc_device_id_handler;
2372
2373         rv = send_get_device_id_cmd(intf);
2374         if (rv)
2375                 return rv;
2376
2377         wait_event(intf->waitq, bmc->dyn_id_set != 2);
2378
2379         if (!bmc->dyn_id_set)
2380                 rv = -EIO; /* Something went wrong in the fetch. */
2381
2382         /* dyn_id_set makes the id data available. */
2383         smp_rmb();
2384
2385         intf->null_user_handler = NULL;
2386
2387         return rv;
2388 }
2389
2390 /*
2391  * Fetch the device id for the bmc/interface.  You must pass in either
2392  * bmc or intf, this code will get the other one.  If the data has
2393  * been recently fetched, this will just use the cached data.  Otherwise
2394  * it will run a new fetch.
2395  *
2396  * Except for the first time this is called (in ipmi_add_smi()),
2397  * this will always return good data;
2398  */
2399 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2400                                struct ipmi_device_id *id,
2401                                bool *guid_set, guid_t *guid, int intf_num)
2402 {
2403         int rv = 0;
2404         int prev_dyn_id_set, prev_guid_set;
2405         bool intf_set = intf != NULL;
2406
2407         if (!intf) {
2408                 mutex_lock(&bmc->dyn_mutex);
2409 retry_bmc_lock:
2410                 if (list_empty(&bmc->intfs)) {
2411                         mutex_unlock(&bmc->dyn_mutex);
2412                         return -ENOENT;
2413                 }
2414                 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2415                                         bmc_link);
2416                 kref_get(&intf->refcount);
2417                 mutex_unlock(&bmc->dyn_mutex);
2418                 mutex_lock(&intf->bmc_reg_mutex);
2419                 mutex_lock(&bmc->dyn_mutex);
2420                 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2421                                              bmc_link)) {
2422                         mutex_unlock(&intf->bmc_reg_mutex);
2423                         kref_put(&intf->refcount, intf_free);
2424                         goto retry_bmc_lock;
2425                 }
2426         } else {
2427                 mutex_lock(&intf->bmc_reg_mutex);
2428                 bmc = intf->bmc;
2429                 mutex_lock(&bmc->dyn_mutex);
2430                 kref_get(&intf->refcount);
2431         }
2432
2433         /* If we have a valid and current ID, just return that. */
2434         if (intf->in_bmc_register ||
2435             (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2436                 goto out_noprocessing;
2437
2438         prev_guid_set = bmc->dyn_guid_set;
2439         __get_guid(intf);
2440
2441         prev_dyn_id_set = bmc->dyn_id_set;
2442         rv = __get_device_id(intf, bmc);
2443         if (rv)
2444                 goto out;
2445
2446         /*
2447          * The guid, device id, manufacturer id, and product id should
2448          * not change on a BMC.  If it does we have to do some dancing.
2449          */
2450         if (!intf->bmc_registered
2451             || (!prev_guid_set && bmc->dyn_guid_set)
2452             || (!prev_dyn_id_set && bmc->dyn_id_set)
2453             || (prev_guid_set && bmc->dyn_guid_set
2454                 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2455             || bmc->id.device_id != bmc->fetch_id.device_id
2456             || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2457             || bmc->id.product_id != bmc->fetch_id.product_id) {
2458                 struct ipmi_device_id id = bmc->fetch_id;
2459                 int guid_set = bmc->dyn_guid_set;
2460                 guid_t guid;
2461
2462                 guid = bmc->fetch_guid;
2463                 mutex_unlock(&bmc->dyn_mutex);
2464
2465                 __ipmi_bmc_unregister(intf);
2466                 /* Fill in the temporary BMC for good measure. */
2467                 intf->bmc->id = id;
2468                 intf->bmc->dyn_guid_set = guid_set;
2469                 intf->bmc->guid = guid;
2470                 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2471                         need_waiter(intf); /* Retry later on an error. */
2472                 else
2473                         __scan_channels(intf, &id);
2474
2475
2476                 if (!intf_set) {
2477                         /*
2478                          * We weren't given the interface on the
2479                          * command line, so restart the operation on
2480                          * the next interface for the BMC.
2481                          */
2482                         mutex_unlock(&intf->bmc_reg_mutex);
2483                         mutex_lock(&bmc->dyn_mutex);
2484                         goto retry_bmc_lock;
2485                 }
2486
2487                 /* We have a new BMC, set it up. */
2488                 bmc = intf->bmc;
2489                 mutex_lock(&bmc->dyn_mutex);
2490                 goto out_noprocessing;
2491         } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2492                 /* Version info changes, scan the channels again. */
2493                 __scan_channels(intf, &bmc->fetch_id);
2494
2495         bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2496
2497 out:
2498         if (rv && prev_dyn_id_set) {
2499                 rv = 0; /* Ignore failures if we have previous data. */
2500                 bmc->dyn_id_set = prev_dyn_id_set;
2501         }
2502         if (!rv) {
2503                 bmc->id = bmc->fetch_id;
2504                 if (bmc->dyn_guid_set)
2505                         bmc->guid = bmc->fetch_guid;
2506                 else if (prev_guid_set)
2507                         /*
2508                          * The guid used to be valid and it failed to fetch,
2509                          * just use the cached value.
2510                          */
2511                         bmc->dyn_guid_set = prev_guid_set;
2512         }
2513 out_noprocessing:
2514         if (!rv) {
2515                 if (id)
2516                         *id = bmc->id;
2517
2518                 if (guid_set)
2519                         *guid_set = bmc->dyn_guid_set;
2520
2521                 if (guid && bmc->dyn_guid_set)
2522                         *guid =  bmc->guid;
2523         }
2524
2525         mutex_unlock(&bmc->dyn_mutex);
2526         mutex_unlock(&intf->bmc_reg_mutex);
2527
2528         kref_put(&intf->refcount, intf_free);
2529         return rv;
2530 }
2531
2532 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2533                              struct ipmi_device_id *id,
2534                              bool *guid_set, guid_t *guid)
2535 {
2536         return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2537 }
2538
2539 static ssize_t device_id_show(struct device *dev,
2540                               struct device_attribute *attr,
2541                               char *buf)
2542 {
2543         struct bmc_device *bmc = to_bmc_device(dev);
2544         struct ipmi_device_id id;
2545         int rv;
2546
2547         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2548         if (rv)
2549                 return rv;
2550
2551         return snprintf(buf, 10, "%u\n", id.device_id);
2552 }
2553 static DEVICE_ATTR_RO(device_id);
2554
2555 static ssize_t provides_device_sdrs_show(struct device *dev,
2556                                          struct device_attribute *attr,
2557                                          char *buf)
2558 {
2559         struct bmc_device *bmc = to_bmc_device(dev);
2560         struct ipmi_device_id id;
2561         int rv;
2562
2563         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2564         if (rv)
2565                 return rv;
2566
2567         return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2568 }
2569 static DEVICE_ATTR_RO(provides_device_sdrs);
2570
2571 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2572                              char *buf)
2573 {
2574         struct bmc_device *bmc = to_bmc_device(dev);
2575         struct ipmi_device_id id;
2576         int rv;
2577
2578         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2579         if (rv)
2580                 return rv;
2581
2582         return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2583 }
2584 static DEVICE_ATTR_RO(revision);
2585
2586 static ssize_t firmware_revision_show(struct device *dev,
2587                                       struct device_attribute *attr,
2588                                       char *buf)
2589 {
2590         struct bmc_device *bmc = to_bmc_device(dev);
2591         struct ipmi_device_id id;
2592         int rv;
2593
2594         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2595         if (rv)
2596                 return rv;
2597
2598         return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2599                         id.firmware_revision_2);
2600 }
2601 static DEVICE_ATTR_RO(firmware_revision);
2602
2603 static ssize_t ipmi_version_show(struct device *dev,
2604                                  struct device_attribute *attr,
2605                                  char *buf)
2606 {
2607         struct bmc_device *bmc = to_bmc_device(dev);
2608         struct ipmi_device_id id;
2609         int rv;
2610
2611         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2612         if (rv)
2613                 return rv;
2614
2615         return snprintf(buf, 20, "%u.%u\n",
2616                         ipmi_version_major(&id),
2617                         ipmi_version_minor(&id));
2618 }
2619 static DEVICE_ATTR_RO(ipmi_version);
2620
2621 static ssize_t add_dev_support_show(struct device *dev,
2622                                     struct device_attribute *attr,
2623                                     char *buf)
2624 {
2625         struct bmc_device *bmc = to_bmc_device(dev);
2626         struct ipmi_device_id id;
2627         int rv;
2628
2629         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2630         if (rv)
2631                 return rv;
2632
2633         return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2634 }
2635 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2636                    NULL);
2637
2638 static ssize_t manufacturer_id_show(struct device *dev,
2639                                     struct device_attribute *attr,
2640                                     char *buf)
2641 {
2642         struct bmc_device *bmc = to_bmc_device(dev);
2643         struct ipmi_device_id id;
2644         int rv;
2645
2646         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2647         if (rv)
2648                 return rv;
2649
2650         return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2651 }
2652 static DEVICE_ATTR_RO(manufacturer_id);
2653
2654 static ssize_t product_id_show(struct device *dev,
2655                                struct device_attribute *attr,
2656                                char *buf)
2657 {
2658         struct bmc_device *bmc = to_bmc_device(dev);
2659         struct ipmi_device_id id;
2660         int rv;
2661
2662         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2663         if (rv)
2664                 return rv;
2665
2666         return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2667 }
2668 static DEVICE_ATTR_RO(product_id);
2669
2670 static ssize_t aux_firmware_rev_show(struct device *dev,
2671                                      struct device_attribute *attr,
2672                                      char *buf)
2673 {
2674         struct bmc_device *bmc = to_bmc_device(dev);
2675         struct ipmi_device_id id;
2676         int rv;
2677
2678         rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2679         if (rv)
2680                 return rv;
2681
2682         return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2683                         id.aux_firmware_revision[3],
2684                         id.aux_firmware_revision[2],
2685                         id.aux_firmware_revision[1],
2686                         id.aux_firmware_revision[0]);
2687 }
2688 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2689
2690 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2691                          char *buf)
2692 {
2693         struct bmc_device *bmc = to_bmc_device(dev);
2694         bool guid_set;
2695         guid_t guid;
2696         int rv;
2697
2698         rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2699         if (rv)
2700                 return rv;
2701         if (!guid_set)
2702                 return -ENOENT;
2703
2704         return snprintf(buf, 38, "%pUl\n", guid.b);
2705 }
2706 static DEVICE_ATTR_RO(guid);
2707
2708 static struct attribute *bmc_dev_attrs[] = {
2709         &dev_attr_device_id.attr,
2710         &dev_attr_provides_device_sdrs.attr,
2711         &dev_attr_revision.attr,
2712         &dev_attr_firmware_revision.attr,
2713         &dev_attr_ipmi_version.attr,
2714         &dev_attr_additional_device_support.attr,
2715         &dev_attr_manufacturer_id.attr,
2716         &dev_attr_product_id.attr,
2717         &dev_attr_aux_firmware_revision.attr,
2718         &dev_attr_guid.attr,
2719         NULL
2720 };
2721
2722 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2723                                        struct attribute *attr, int idx)
2724 {
2725         struct device *dev = kobj_to_dev(kobj);
2726         struct bmc_device *bmc = to_bmc_device(dev);
2727         umode_t mode = attr->mode;
2728         int rv;
2729
2730         if (attr == &dev_attr_aux_firmware_revision.attr) {
2731                 struct ipmi_device_id id;
2732
2733                 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2734                 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2735         }
2736         if (attr == &dev_attr_guid.attr) {
2737                 bool guid_set;
2738
2739                 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2740                 return (!rv && guid_set) ? mode : 0;
2741         }
2742         return mode;
2743 }
2744
2745 static const struct attribute_group bmc_dev_attr_group = {
2746         .attrs          = bmc_dev_attrs,
2747         .is_visible     = bmc_dev_attr_is_visible,
2748 };
2749
2750 static const struct attribute_group *bmc_dev_attr_groups[] = {
2751         &bmc_dev_attr_group,
2752         NULL
2753 };
2754
2755 static const struct device_type bmc_device_type = {
2756         .groups         = bmc_dev_attr_groups,
2757 };
2758
2759 static int __find_bmc_guid(struct device *dev, void *data)
2760 {
2761         guid_t *guid = data;
2762         struct bmc_device *bmc;
2763         int rv;
2764
2765         if (dev->type != &bmc_device_type)
2766                 return 0;
2767
2768         bmc = to_bmc_device(dev);
2769         rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2770         if (rv)
2771                 rv = kref_get_unless_zero(&bmc->usecount);
2772         return rv;
2773 }
2774
2775 /*
2776  * Returns with the bmc's usecount incremented, if it is non-NULL.
2777  */
2778 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2779                                              guid_t *guid)
2780 {
2781         struct device *dev;
2782         struct bmc_device *bmc = NULL;
2783
2784         dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2785         if (dev) {
2786                 bmc = to_bmc_device(dev);
2787                 put_device(dev);
2788         }
2789         return bmc;
2790 }
2791
2792 struct prod_dev_id {
2793         unsigned int  product_id;
2794         unsigned char device_id;
2795 };
2796
2797 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2798 {
2799         struct prod_dev_id *cid = data;
2800         struct bmc_device *bmc;
2801         int rv;
2802
2803         if (dev->type != &bmc_device_type)
2804                 return 0;
2805
2806         bmc = to_bmc_device(dev);
2807         rv = (bmc->id.product_id == cid->product_id
2808               && bmc->id.device_id == cid->device_id);
2809         if (rv)
2810                 rv = kref_get_unless_zero(&bmc->usecount);
2811         return rv;
2812 }
2813
2814 /*
2815  * Returns with the bmc's usecount incremented, if it is non-NULL.
2816  */
2817 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2818         struct device_driver *drv,
2819         unsigned int product_id, unsigned char device_id)
2820 {
2821         struct prod_dev_id id = {
2822                 .product_id = product_id,
2823                 .device_id = device_id,
2824         };
2825         struct device *dev;
2826         struct bmc_device *bmc = NULL;
2827
2828         dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2829         if (dev) {
2830                 bmc = to_bmc_device(dev);
2831                 put_device(dev);
2832         }
2833         return bmc;
2834 }
2835
2836 static DEFINE_IDA(ipmi_bmc_ida);
2837
2838 static void
2839 release_bmc_device(struct device *dev)
2840 {
2841         kfree(to_bmc_device(dev));
2842 }
2843
2844 static void cleanup_bmc_work(struct work_struct *work)
2845 {
2846         struct bmc_device *bmc = container_of(work, struct bmc_device,
2847                                               remove_work);
2848         int id = bmc->pdev.id; /* Unregister overwrites id */
2849
2850         platform_device_unregister(&bmc->pdev);
2851         ida_simple_remove(&ipmi_bmc_ida, id);
2852 }
2853
2854 static void
2855 cleanup_bmc_device(struct kref *ref)
2856 {
2857         struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2858
2859         /*
2860          * Remove the platform device in a work queue to avoid issues
2861          * with removing the device attributes while reading a device
2862          * attribute.
2863          */
2864         schedule_work(&bmc->remove_work);
2865 }
2866
2867 /*
2868  * Must be called with intf->bmc_reg_mutex held.
2869  */
2870 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
2871 {
2872         struct bmc_device *bmc = intf->bmc;
2873
2874         if (!intf->bmc_registered)
2875                 return;
2876
2877         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2878         sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2879         kfree(intf->my_dev_name);
2880         intf->my_dev_name = NULL;
2881
2882         mutex_lock(&bmc->dyn_mutex);
2883         list_del(&intf->bmc_link);
2884         mutex_unlock(&bmc->dyn_mutex);
2885         intf->bmc = &intf->tmp_bmc;
2886         kref_put(&bmc->usecount, cleanup_bmc_device);
2887         intf->bmc_registered = false;
2888 }
2889
2890 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
2891 {
2892         mutex_lock(&intf->bmc_reg_mutex);
2893         __ipmi_bmc_unregister(intf);
2894         mutex_unlock(&intf->bmc_reg_mutex);
2895 }
2896
2897 /*
2898  * Must be called with intf->bmc_reg_mutex held.
2899  */
2900 static int __ipmi_bmc_register(struct ipmi_smi *intf,
2901                                struct ipmi_device_id *id,
2902                                bool guid_set, guid_t *guid, int intf_num)
2903 {
2904         int               rv;
2905         struct bmc_device *bmc;
2906         struct bmc_device *old_bmc;
2907
2908         /*
2909          * platform_device_register() can cause bmc_reg_mutex to
2910          * be claimed because of the is_visible functions of
2911          * the attributes.  Eliminate possible recursion and
2912          * release the lock.
2913          */
2914         intf->in_bmc_register = true;
2915         mutex_unlock(&intf->bmc_reg_mutex);
2916
2917         /*
2918          * Try to find if there is an bmc_device struct
2919          * representing the interfaced BMC already
2920          */
2921         mutex_lock(&ipmidriver_mutex);
2922         if (guid_set)
2923                 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2924         else
2925                 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2926                                                     id->product_id,
2927                                                     id->device_id);
2928
2929         /*
2930          * If there is already an bmc_device, free the new one,
2931          * otherwise register the new BMC device
2932          */
2933         if (old_bmc) {
2934                 bmc = old_bmc;
2935                 /*
2936                  * Note: old_bmc already has usecount incremented by
2937                  * the BMC find functions.
2938                  */
2939                 intf->bmc = old_bmc;
2940                 mutex_lock(&bmc->dyn_mutex);
2941                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2942                 mutex_unlock(&bmc->dyn_mutex);
2943
2944                 dev_info(intf->si_dev,
2945                          "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2946                          " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2947                          bmc->id.manufacturer_id,
2948                          bmc->id.product_id,
2949                          bmc->id.device_id);
2950         } else {
2951                 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2952                 if (!bmc) {
2953                         rv = -ENOMEM;
2954                         goto out;
2955                 }
2956                 INIT_LIST_HEAD(&bmc->intfs);
2957                 mutex_init(&bmc->dyn_mutex);
2958                 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
2959
2960                 bmc->id = *id;
2961                 bmc->dyn_id_set = 1;
2962                 bmc->dyn_guid_set = guid_set;
2963                 bmc->guid = *guid;
2964                 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2965
2966                 bmc->pdev.name = "ipmi_bmc";
2967
2968                 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2969                 if (rv < 0) {
2970                         kfree(bmc);
2971                         goto out;
2972                 }
2973
2974                 bmc->pdev.dev.driver = &ipmidriver.driver;
2975                 bmc->pdev.id = rv;
2976                 bmc->pdev.dev.release = release_bmc_device;
2977                 bmc->pdev.dev.type = &bmc_device_type;
2978                 kref_init(&bmc->usecount);
2979
2980                 intf->bmc = bmc;
2981                 mutex_lock(&bmc->dyn_mutex);
2982                 list_add_tail(&intf->bmc_link, &bmc->intfs);
2983                 mutex_unlock(&bmc->dyn_mutex);
2984
2985                 rv = platform_device_register(&bmc->pdev);
2986                 if (rv) {
2987                         dev_err(intf->si_dev,
2988                                 PFX " Unable to register bmc device: %d\n",
2989                                 rv);
2990                         goto out_list_del;
2991                 }
2992
2993                 dev_info(intf->si_dev,
2994                          "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2995                          bmc->id.manufacturer_id,
2996                          bmc->id.product_id,
2997                          bmc->id.device_id);
2998         }
2999
3000         /*
3001          * create symlink from system interface device to bmc device
3002          * and back.
3003          */
3004         rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3005         if (rv) {
3006                 dev_err(intf->si_dev,
3007                         PFX "Unable to create bmc symlink: %d\n", rv);
3008                 goto out_put_bmc;
3009         }
3010
3011         if (intf_num == -1)
3012                 intf_num = intf->intf_num;
3013         intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3014         if (!intf->my_dev_name) {
3015                 rv = -ENOMEM;
3016                 dev_err(intf->si_dev,
3017                         PFX "Unable to allocate link from BMC: %d\n", rv);
3018                 goto out_unlink1;
3019         }
3020
3021         rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3022                                intf->my_dev_name);
3023         if (rv) {
3024                 kfree(intf->my_dev_name);
3025                 intf->my_dev_name = NULL;
3026                 dev_err(intf->si_dev,
3027                         PFX "Unable to create symlink to bmc: %d\n", rv);
3028                 goto out_free_my_dev_name;
3029         }
3030
3031         intf->bmc_registered = true;
3032
3033 out:
3034         mutex_unlock(&ipmidriver_mutex);
3035         mutex_lock(&intf->bmc_reg_mutex);
3036         intf->in_bmc_register = false;
3037         return rv;
3038
3039
3040 out_free_my_dev_name:
3041         kfree(intf->my_dev_name);
3042         intf->my_dev_name = NULL;
3043
3044 out_unlink1:
3045         sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3046
3047 out_put_bmc:
3048         mutex_lock(&bmc->dyn_mutex);
3049         list_del(&intf->bmc_link);
3050         mutex_unlock(&bmc->dyn_mutex);
3051         intf->bmc = &intf->tmp_bmc;
3052         kref_put(&bmc->usecount, cleanup_bmc_device);
3053         goto out;
3054
3055 out_list_del:
3056         mutex_lock(&bmc->dyn_mutex);
3057         list_del(&intf->bmc_link);
3058         mutex_unlock(&bmc->dyn_mutex);
3059         intf->bmc = &intf->tmp_bmc;
3060         put_device(&bmc->pdev.dev);
3061         goto out;
3062 }
3063
3064 static int
3065 send_guid_cmd(struct ipmi_smi *intf, int chan)
3066 {
3067         struct kernel_ipmi_msg            msg;
3068         struct ipmi_system_interface_addr si;
3069
3070         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3071         si.channel = IPMI_BMC_CHANNEL;
3072         si.lun = 0;
3073
3074         msg.netfn = IPMI_NETFN_APP_REQUEST;
3075         msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3076         msg.data = NULL;
3077         msg.data_len = 0;
3078         return i_ipmi_request(NULL,
3079                               intf,
3080                               (struct ipmi_addr *) &si,
3081                               0,
3082                               &msg,
3083                               intf,
3084                               NULL,
3085                               NULL,
3086                               0,
3087                               intf->addrinfo[0].address,
3088                               intf->addrinfo[0].lun,
3089                               -1, 0);
3090 }
3091
3092 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3093 {
3094         struct bmc_device *bmc = intf->bmc;
3095
3096         if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3097             || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3098             || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3099                 /* Not for me */
3100                 return;
3101
3102         if (msg->msg.data[0] != 0) {
3103                 /* Error from getting the GUID, the BMC doesn't have one. */
3104                 bmc->dyn_guid_set = 0;
3105                 goto out;
3106         }
3107
3108         if (msg->msg.data_len < 17) {
3109                 bmc->dyn_guid_set = 0;
3110                 dev_warn(intf->si_dev,
3111                          PFX "The GUID response from the BMC was too short, it was %d but should have been 17.  Assuming GUID is not available.\n",
3112                          msg->msg.data_len);
3113                 goto out;
3114         }
3115
3116         memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3117         /*
3118          * Make sure the guid data is available before setting
3119          * dyn_guid_set.
3120          */
3121         smp_wmb();
3122         bmc->dyn_guid_set = 1;
3123  out:
3124         wake_up(&intf->waitq);
3125 }
3126
3127 static void __get_guid(struct ipmi_smi *intf)
3128 {
3129         int rv;
3130         struct bmc_device *bmc = intf->bmc;
3131
3132         bmc->dyn_guid_set = 2;
3133         intf->null_user_handler = guid_handler;
3134         rv = send_guid_cmd(intf, 0);
3135         if (rv)
3136                 /* Send failed, no GUID available. */
3137                 bmc->dyn_guid_set = 0;
3138         else
3139                 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3140
3141         /* dyn_guid_set makes the guid data available. */
3142         smp_rmb();
3143
3144         intf->null_user_handler = NULL;
3145 }
3146
3147 static int
3148 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3149 {
3150         struct kernel_ipmi_msg            msg;
3151         unsigned char                     data[1];
3152         struct ipmi_system_interface_addr si;
3153
3154         si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3155         si.channel = IPMI_BMC_CHANNEL;
3156         si.lun = 0;
3157
3158         msg.netfn = IPMI_NETFN_APP_REQUEST;
3159         msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3160         msg.data = data;
3161         msg.data_len = 1;
3162         data[0] = chan;
3163         return i_ipmi_request(NULL,
3164                               intf,
3165                               (struct ipmi_addr *) &si,
3166                               0,
3167                               &msg,
3168                               intf,
3169                               NULL,
3170                               NULL,
3171                               0,
3172                               intf->addrinfo[0].address,
3173                               intf->addrinfo[0].lun,
3174                               -1, 0);
3175 }
3176
3177 static void
3178 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3179 {
3180         int rv = 0;
3181         int ch;
3182         unsigned int set = intf->curr_working_cset;
3183         struct ipmi_channel *chans;
3184
3185         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3186             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3187             && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3188                 /* It's the one we want */
3189                 if (msg->msg.data[0] != 0) {
3190                         /* Got an error from the channel, just go on. */
3191
3192                         if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3193                                 /*
3194                                  * If the MC does not support this
3195                                  * command, that is legal.  We just
3196                                  * assume it has one IPMB at channel
3197                                  * zero.
3198                                  */
3199                                 intf->wchannels[set].c[0].medium
3200                                         = IPMI_CHANNEL_MEDIUM_IPMB;
3201                                 intf->wchannels[set].c[0].protocol
3202                                         = IPMI_CHANNEL_PROTOCOL_IPMB;
3203
3204                                 intf->channel_list = intf->wchannels + set;
3205                                 intf->channels_ready = true;
3206                                 wake_up(&intf->waitq);
3207                                 goto out;
3208                         }
3209                         goto next_channel;
3210                 }
3211                 if (msg->msg.data_len < 4) {
3212                         /* Message not big enough, just go on. */
3213                         goto next_channel;
3214                 }
3215                 ch = intf->curr_channel;
3216                 chans = intf->wchannels[set].c;
3217                 chans[ch].medium = msg->msg.data[2] & 0x7f;
3218                 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3219
3220  next_channel:
3221                 intf->curr_channel++;
3222                 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3223                         intf->channel_list = intf->wchannels + set;
3224                         intf->channels_ready = true;
3225                         wake_up(&intf->waitq);
3226                 } else {
3227                         intf->channel_list = intf->wchannels + set;
3228                         intf->channels_ready = true;
3229                         rv = send_channel_info_cmd(intf, intf->curr_channel);
3230                 }
3231
3232                 if (rv) {
3233                         /* Got an error somehow, just give up. */
3234                         dev_warn(intf->si_dev,
3235                                  PFX "Error sending channel information for channel %d: %d\n",
3236                                  intf->curr_channel, rv);
3237
3238                         intf->channel_list = intf->wchannels + set;
3239                         intf->channels_ready = true;
3240                         wake_up(&intf->waitq);
3241                 }
3242         }
3243  out:
3244         return;
3245 }
3246
3247 /*
3248  * Must be holding intf->bmc_reg_mutex to call this.
3249  */
3250 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3251 {
3252         int rv;
3253
3254         if (ipmi_version_major(id) > 1
3255                         || (ipmi_version_major(id) == 1
3256                             && ipmi_version_minor(id) >= 5)) {
3257                 unsigned int set;
3258
3259                 /*
3260                  * Start scanning the channels to see what is
3261                  * available.
3262                  */
3263                 set = !intf->curr_working_cset;
3264                 intf->curr_working_cset = set;
3265                 memset(&intf->wchannels[set], 0,
3266                        sizeof(struct ipmi_channel_set));
3267
3268                 intf->null_user_handler = channel_handler;
3269                 intf->curr_channel = 0;
3270                 rv = send_channel_info_cmd(intf, 0);
3271                 if (rv) {
3272                         dev_warn(intf->si_dev,
3273                                  "Error sending channel information for channel 0, %d\n",
3274                                  rv);
3275                         return -EIO;
3276                 }
3277
3278                 /* Wait for the channel info to be read. */
3279                 wait_event(intf->waitq, intf->channels_ready);
3280                 intf->null_user_handler = NULL;
3281         } else {
3282                 unsigned int set = intf->curr_working_cset;
3283
3284                 /* Assume a single IPMB channel at zero. */
3285                 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3286                 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3287                 intf->channel_list = intf->wchannels + set;
3288                 intf->channels_ready = true;
3289         }
3290
3291         return 0;
3292 }
3293
3294 static void ipmi_poll(struct ipmi_smi *intf)
3295 {
3296         if (intf->handlers->poll)
3297                 intf->handlers->poll(intf->send_info);
3298         /* In case something came in */
3299         handle_new_recv_msgs(intf);
3300 }
3301
3302 void ipmi_poll_interface(struct ipmi_user *user)
3303 {
3304         ipmi_poll(user->intf);
3305 }
3306 EXPORT_SYMBOL(ipmi_poll_interface);
3307
3308 static void redo_bmc_reg(struct work_struct *work)
3309 {
3310         struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3311                                              bmc_reg_work);
3312
3313         if (!intf->in_shutdown)
3314                 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3315
3316         kref_put(&intf->refcount, intf_free);
3317 }
3318
3319 int ipmi_add_smi(struct module         *owner,
3320                  const struct ipmi_smi_handlers *handlers,
3321                  void                  *send_info,
3322                  struct device         *si_dev,
3323                  unsigned char         slave_addr)
3324 {
3325         int              i, j;
3326         int              rv;
3327         struct ipmi_smi *intf, *tintf;
3328         struct list_head *link;
3329         struct ipmi_device_id id;
3330
3331         /*
3332          * Make sure the driver is actually initialized, this handles
3333          * problems with initialization order.
3334          */
3335         rv = ipmi_init_msghandler();
3336         if (rv)
3337                 return rv;
3338
3339         intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3340         if (!intf)
3341                 return -ENOMEM;
3342
3343         rv = init_srcu_struct(&intf->users_srcu);
3344         if (rv) {
3345                 kfree(intf);
3346                 return rv;
3347         }
3348
3349         intf->owner = owner;
3350         intf->bmc = &intf->tmp_bmc;
3351         INIT_LIST_HEAD(&intf->bmc->intfs);
3352         mutex_init(&intf->bmc->dyn_mutex);
3353         INIT_LIST_HEAD(&intf->bmc_link);
3354         mutex_init(&intf->bmc_reg_mutex);
3355         intf->intf_num = -1; /* Mark it invalid for now. */
3356         kref_init(&intf->refcount);
3357         INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3358         intf->si_dev = si_dev;
3359         for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3360                 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3361                 intf->addrinfo[j].lun = 2;
3362         }
3363         if (slave_addr != 0)
3364                 intf->addrinfo[0].address = slave_addr;
3365         INIT_LIST_HEAD(&intf->users);
3366         intf->handlers = handlers;
3367         intf->send_info = send_info;
3368         spin_lock_init(&intf->seq_lock);
3369         for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3370                 intf->seq_table[j].inuse = 0;
3371                 intf->seq_table[j].seqid = 0;
3372         }
3373         intf->curr_seq = 0;
3374         spin_lock_init(&intf->waiting_rcv_msgs_lock);
3375         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3376         tasklet_init(&intf->recv_tasklet,
3377                      smi_recv_tasklet,
3378                      (unsigned long) intf);
3379         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3380         spin_lock_init(&intf->xmit_msgs_lock);
3381         INIT_LIST_HEAD(&intf->xmit_msgs);
3382         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3383         spin_lock_init(&intf->events_lock);
3384         atomic_set(&intf->event_waiters, 0);
3385         intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3386         INIT_LIST_HEAD(&intf->waiting_events);
3387         intf->waiting_events_count = 0;
3388         mutex_init(&intf->cmd_rcvrs_mutex);
3389         spin_lock_init(&intf->maintenance_mode_lock);
3390         INIT_LIST_HEAD(&intf->cmd_rcvrs);
3391         init_waitqueue_head(&intf->waitq);
3392         for (i = 0; i < IPMI_NUM_STATS; i++)
3393                 atomic_set(&intf->stats[i], 0);
3394
3395         mutex_lock(&ipmi_interfaces_mutex);
3396         /* Look for a hole in the numbers. */
3397         i = 0;
3398         link = &ipmi_interfaces;
3399         list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3400                 if (tintf->intf_num != i) {
3401                         link = &tintf->link;
3402                         break;
3403                 }
3404                 i++;
3405         }
3406         /* Add the new interface in numeric order. */
3407         if (i == 0)
3408                 list_add_rcu(&intf->link, &ipmi_interfaces);
3409         else
3410                 list_add_tail_rcu(&intf->link, link);
3411
3412         rv = handlers->start_processing(send_info, intf);
3413         if (rv)
3414                 goto out_err;
3415
3416         rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3417         if (rv) {
3418                 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3419                 goto out_err_started;
3420         }
3421
3422         mutex_lock(&intf->bmc_reg_mutex);
3423         rv = __scan_channels(intf, &id);
3424         mutex_unlock(&intf->bmc_reg_mutex);
3425         if (rv)
3426                 goto out_err_bmc_reg;
3427
3428         /*
3429          * Keep memory order straight for RCU readers.  Make
3430          * sure everything else is committed to memory before
3431          * setting intf_num to mark the interface valid.
3432          */
3433         smp_wmb();
3434         intf->intf_num = i;
3435         mutex_unlock(&ipmi_interfaces_mutex);
3436
3437         /* After this point the interface is legal to use. */
3438         call_smi_watchers(i, intf->si_dev);
3439
3440         return 0;
3441
3442  out_err_bmc_reg:
3443         ipmi_bmc_unregister(intf);
3444  out_err_started:
3445         if (intf->handlers->shutdown)
3446                 intf->handlers->shutdown(intf->send_info);
3447  out_err:
3448         list_del_rcu(&intf->link);
3449         mutex_unlock(&ipmi_interfaces_mutex);
3450         synchronize_srcu(&ipmi_interfaces_srcu);
3451         cleanup_srcu_struct(&intf->users_srcu);
3452         kref_put(&intf->refcount, intf_free);
3453
3454         return rv;
3455 }
3456 EXPORT_SYMBOL(ipmi_add_smi);
3457
3458 static void deliver_smi_err_response(struct ipmi_smi *intf,
3459                                      struct ipmi_smi_msg *msg,
3460                                      unsigned char err)
3461 {
3462         msg->rsp[0] = msg->data[0] | 4;
3463         msg->rsp[1] = msg->data[1];
3464         msg->rsp[2] = err;
3465         msg->rsp_size = 3;
3466         /* It's an error, so it will never requeue, no need to check return. */
3467         handle_one_recv_msg(intf, msg);
3468 }
3469
3470 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3471 {
3472         int              i;
3473         struct seq_table *ent;
3474         struct ipmi_smi_msg *msg;
3475         struct list_head *entry;
3476         struct list_head tmplist;
3477
3478         /* Clear out our transmit queues and hold the messages. */
3479         INIT_LIST_HEAD(&tmplist);
3480         list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3481         list_splice_tail(&intf->xmit_msgs, &tmplist);
3482
3483         /* Current message first, to preserve order */
3484         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3485                 /* Wait for the message to clear out. */
3486                 schedule_timeout(1);
3487         }
3488
3489         /* No need for locks, the interface is down. */
3490
3491         /*
3492          * Return errors for all pending messages in queue and in the
3493          * tables waiting for remote responses.
3494          */
3495         while (!list_empty(&tmplist)) {
3496                 entry = tmplist.next;
3497                 list_del(entry);
3498                 msg = list_entry(entry, struct ipmi_smi_msg, link);
3499                 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3500         }
3501
3502         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3503                 ent = &intf->seq_table[i];
3504                 if (!ent->inuse)
3505                         continue;
3506                 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3507         }
3508 }
3509
3510 void ipmi_unregister_smi(struct ipmi_smi *intf)
3511 {
3512         struct ipmi_smi_watcher *w;
3513         int intf_num = intf->intf_num, index;
3514
3515         mutex_lock(&ipmi_interfaces_mutex);
3516         intf->intf_num = -1;
3517         intf->in_shutdown = true;
3518         list_del_rcu(&intf->link);
3519         mutex_unlock(&ipmi_interfaces_mutex);
3520         synchronize_srcu(&ipmi_interfaces_srcu);
3521
3522         /* At this point no users can be added to the interface. */
3523
3524         /*
3525          * Call all the watcher interfaces to tell them that
3526          * an interface is going away.
3527          */
3528         mutex_lock(&smi_watchers_mutex);
3529         list_for_each_entry(w, &smi_watchers, link)
3530                 w->smi_gone(intf_num);
3531         mutex_unlock(&smi_watchers_mutex);
3532
3533         index = srcu_read_lock(&intf->users_srcu);
3534         while (!list_empty(&intf->users)) {
3535                 struct ipmi_user *user =
3536                         container_of(list_next_rcu(&intf->users),
3537                                      struct ipmi_user, link);
3538
3539                 _ipmi_destroy_user(user);
3540         }
3541         srcu_read_unlock(&intf->users_srcu, index);
3542
3543         if (intf->handlers->shutdown)
3544                 intf->handlers->shutdown(intf->send_info);
3545
3546         cleanup_smi_msgs(intf);
3547
3548         ipmi_bmc_unregister(intf);
3549
3550         cleanup_srcu_struct(&intf->users_srcu);
3551         kref_put(&intf->refcount, intf_free);
3552 }
3553 EXPORT_SYMBOL(ipmi_unregister_smi);
3554
3555 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3556                                    struct ipmi_smi_msg *msg)
3557 {
3558         struct ipmi_ipmb_addr ipmb_addr;
3559         struct ipmi_recv_msg  *recv_msg;
3560
3561         /*
3562          * This is 11, not 10, because the response must contain a
3563          * completion code.
3564          */
3565         if (msg->rsp_size < 11) {
3566                 /* Message not big enough, just ignore it. */
3567                 ipmi_inc_stat(intf, invalid_ipmb_responses);
3568                 return 0;
3569         }
3570
3571         if (msg->rsp[2] != 0) {
3572                 /* An error getting the response, just ignore it. */
3573                 return 0;
3574         }
3575
3576         ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3577         ipmb_addr.slave_addr = msg->rsp[6];
3578         ipmb_addr.channel = msg->rsp[3] & 0x0f;
3579         ipmb_addr.lun = msg->rsp[7] & 3;
3580
3581         /*
3582          * It's a response from a remote entity.  Look up the sequence
3583          * number and handle the response.
3584          */
3585         if (intf_find_seq(intf,
3586                           msg->rsp[7] >> 2,
3587                           msg->rsp[3] & 0x0f,
3588                           msg->rsp[8],
3589                           (msg->rsp[4] >> 2) & (~1),
3590                           (struct ipmi_addr *) &ipmb_addr,
3591                           &recv_msg)) {
3592                 /*
3593                  * We were unable to find the sequence number,
3594                  * so just nuke the message.
3595                  */
3596                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3597                 return 0;
3598         }
3599
3600         memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3601         /*
3602          * The other fields matched, so no need to set them, except
3603          * for netfn, which needs to be the response that was
3604          * returned, not the request value.
3605          */
3606         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3607         recv_msg->msg.data = recv_msg->msg_data;
3608         recv_msg->msg.data_len = msg->rsp_size - 10;
3609         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3610         if (deliver_response(intf, recv_msg))
3611                 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3612         else
3613                 ipmi_inc_stat(intf, handled_ipmb_responses);
3614
3615         return 0;
3616 }
3617
3618 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3619                                    struct ipmi_smi_msg *msg)
3620 {
3621         struct cmd_rcvr          *rcvr;
3622         int                      rv = 0;
3623         unsigned char            netfn;
3624         unsigned char            cmd;
3625         unsigned char            chan;
3626         struct ipmi_user         *user = NULL;
3627         struct ipmi_ipmb_addr    *ipmb_addr;
3628         struct ipmi_recv_msg     *recv_msg;
3629
3630         if (msg->rsp_size < 10) {
3631                 /* Message not big enough, just ignore it. */
3632                 ipmi_inc_stat(intf, invalid_commands);
3633                 return 0;
3634         }
3635
3636         if (msg->rsp[2] != 0) {
3637                 /* An error getting the response, just ignore it. */
3638                 return 0;
3639         }
3640
3641         netfn = msg->rsp[4] >> 2;
3642         cmd = msg->rsp[8];
3643         chan = msg->rsp[3] & 0xf;
3644
3645         rcu_read_lock();
3646         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3647         if (rcvr) {
3648                 user = rcvr->user;
3649                 kref_get(&user->refcount);
3650         } else
3651                 user = NULL;
3652         rcu_read_unlock();
3653
3654         if (user == NULL) {
3655                 /* We didn't find a user, deliver an error response. */
3656                 ipmi_inc_stat(intf, unhandled_commands);
3657
3658                 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3659                 msg->data[1] = IPMI_SEND_MSG_CMD;
3660                 msg->data[2] = msg->rsp[3];
3661                 msg->data[3] = msg->rsp[6];
3662                 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3663                 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3664                 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3665                 /* rqseq/lun */
3666                 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3667                 msg->data[8] = msg->rsp[8]; /* cmd */
3668                 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3669                 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3670                 msg->data_size = 11;
3671
3672                 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3673
3674                 rcu_read_lock();
3675                 if (!intf->in_shutdown) {
3676                         smi_send(intf, intf->handlers, msg, 0);
3677                         /*
3678                          * We used the message, so return the value
3679                          * that causes it to not be freed or
3680                          * queued.
3681                          */
3682                         rv = -1;
3683                 }
3684                 rcu_read_unlock();
3685         } else {
3686                 recv_msg = ipmi_alloc_recv_msg();
3687                 if (!recv_msg) {
3688                         /*
3689                          * We couldn't allocate memory for the
3690                          * message, so requeue it for handling
3691                          * later.
3692                          */
3693                         rv = 1;
3694                         kref_put(&user->refcount, free_user);
3695                 } else {
3696                         /* Extract the source address from the data. */
3697                         ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3698                         ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3699                         ipmb_addr->slave_addr = msg->rsp[6];
3700                         ipmb_addr->lun = msg->rsp[7] & 3;
3701                         ipmb_addr->channel = msg->rsp[3] & 0xf;
3702
3703                         /*
3704                          * Extract the rest of the message information
3705                          * from the IPMB header.
3706                          */
3707                         recv_msg->user = user;
3708                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3709                         recv_msg->msgid = msg->rsp[7] >> 2;
3710                         recv_msg->msg.netfn = msg->rsp[4] >> 2;
3711                         recv_msg->msg.cmd = msg->rsp[8];
3712                         recv_msg->msg.data = recv_msg->msg_data;
3713
3714                         /*
3715                          * We chop off 10, not 9 bytes because the checksum
3716                          * at the end also needs to be removed.
3717                          */
3718                         recv_msg->msg.data_len = msg->rsp_size - 10;
3719                         memcpy(recv_msg->msg_data, &msg->rsp[9],
3720                                msg->rsp_size - 10);
3721                         if (deliver_response(intf, recv_msg))
3722                                 ipmi_inc_stat(intf, unhandled_commands);
3723                         else
3724                                 ipmi_inc_stat(intf, handled_commands);
3725                 }
3726         }
3727
3728         return rv;
3729 }
3730
3731 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
3732                                   struct ipmi_smi_msg *msg)
3733 {
3734         struct ipmi_lan_addr  lan_addr;
3735         struct ipmi_recv_msg  *recv_msg;
3736
3737
3738         /*
3739          * This is 13, not 12, because the response must contain a
3740          * completion code.
3741          */
3742         if (msg->rsp_size < 13) {
3743                 /* Message not big enough, just ignore it. */
3744                 ipmi_inc_stat(intf, invalid_lan_responses);
3745                 return 0;
3746         }
3747
3748         if (msg->rsp[2] != 0) {
3749                 /* An error getting the response, just ignore it. */
3750                 return 0;
3751         }
3752
3753         lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3754         lan_addr.session_handle = msg->rsp[4];
3755         lan_addr.remote_SWID = msg->rsp[8];
3756         lan_addr.local_SWID = msg->rsp[5];
3757         lan_addr.channel = msg->rsp[3] & 0x0f;
3758         lan_addr.privilege = msg->rsp[3] >> 4;
3759         lan_addr.lun = msg->rsp[9] & 3;
3760
3761         /*
3762          * It's a response from a remote entity.  Look up the sequence
3763          * number and handle the response.
3764          */
3765         if (intf_find_seq(intf,
3766                           msg->rsp[9] >> 2,
3767                           msg->rsp[3] & 0x0f,
3768                           msg->rsp[10],
3769                           (msg->rsp[6] >> 2) & (~1),
3770                           (struct ipmi_addr *) &lan_addr,
3771                           &recv_msg)) {
3772                 /*
3773                  * We were unable to find the sequence number,
3774                  * so just nuke the message.
3775                  */
3776                 ipmi_inc_stat(intf, unhandled_lan_responses);
3777                 return 0;
3778         }
3779
3780         memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
3781         /*
3782          * The other fields matched, so no need to set them, except
3783          * for netfn, which needs to be the response that was
3784          * returned, not the request value.
3785          */
3786         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3787         recv_msg->msg.data = recv_msg->msg_data;
3788         recv_msg->msg.data_len = msg->rsp_size - 12;
3789         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3790         if (deliver_response(intf, recv_msg))
3791                 ipmi_inc_stat(intf, unhandled_lan_responses);
3792         else
3793                 ipmi_inc_stat(intf, handled_lan_responses);
3794
3795         return 0;
3796 }
3797
3798 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
3799                                   struct ipmi_smi_msg *msg)
3800 {
3801         struct cmd_rcvr          *rcvr;
3802         int                      rv = 0;
3803         unsigned char            netfn;
3804         unsigned char            cmd;
3805         unsigned char            chan;
3806         struct ipmi_user         *user = NULL;
3807         struct ipmi_lan_addr     *lan_addr;
3808         struct ipmi_recv_msg     *recv_msg;
3809
3810         if (msg->rsp_size < 12) {
3811                 /* Message not big enough, just ignore it. */
3812                 ipmi_inc_stat(intf, invalid_commands);
3813                 return 0;
3814         }
3815
3816         if (msg->rsp[2] != 0) {
3817                 /* An error getting the response, just ignore it. */
3818                 return 0;
3819         }
3820
3821         netfn = msg->rsp[6] >> 2;
3822         cmd = msg->rsp[10];
3823         chan = msg->rsp[3] & 0xf;
3824
3825         rcu_read_lock();
3826         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3827         if (rcvr) {
3828                 user = rcvr->user;
3829                 kref_get(&user->refcount);
3830         } else
3831                 user = NULL;
3832         rcu_read_unlock();
3833
3834         if (user == NULL) {
3835                 /* We didn't find a user, just give up. */
3836                 ipmi_inc_stat(intf, unhandled_commands);
3837
3838                 /*
3839                  * Don't do anything with these messages, just allow
3840                  * them to be freed.
3841                  */
3842                 rv = 0;
3843         } else {
3844                 recv_msg = ipmi_alloc_recv_msg();
3845                 if (!recv_msg) {
3846                         /*
3847                          * We couldn't allocate memory for the
3848                          * message, so requeue it for handling later.
3849                          */
3850                         rv = 1;
3851                         kref_put(&user->refcount, free_user);
3852                 } else {
3853                         /* Extract the source address from the data. */
3854                         lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3855                         lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3856                         lan_addr->session_handle = msg->rsp[4];
3857                         lan_addr->remote_SWID = msg->rsp[8];
3858                         lan_addr->local_SWID = msg->rsp[5];
3859                         lan_addr->lun = msg->rsp[9] & 3;
3860                         lan_addr->channel = msg->rsp[3] & 0xf;
3861                         lan_addr->privilege = msg->rsp[3] >> 4;
3862
3863                         /*
3864                          * Extract the rest of the message information
3865                          * from the IPMB header.
3866                          */
3867                         recv_msg->user = user;
3868                         recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3869                         recv_msg->msgid = msg->rsp[9] >> 2;
3870                         recv_msg->msg.netfn = msg->rsp[6] >> 2;
3871                         recv_msg->msg.cmd = msg->rsp[10];
3872                         recv_msg->msg.data = recv_msg->msg_data;
3873
3874                         /*
3875                          * We chop off 12, not 11 bytes because the checksum
3876                          * at the end also needs to be removed.
3877                          */
3878                         recv_msg->msg.data_len = msg->rsp_size - 12;
3879                         memcpy(recv_msg->msg_data, &msg->rsp[11],
3880                                msg->rsp_size - 12);
3881                         if (deliver_response(intf, recv_msg))
3882                                 ipmi_inc_stat(intf, unhandled_commands);
3883                         else
3884                                 ipmi_inc_stat(intf, handled_commands);
3885                 }
3886         }
3887
3888         return rv;
3889 }
3890
3891 /*
3892  * This routine will handle "Get Message" command responses with
3893  * channels that use an OEM Medium. The message format belongs to
3894  * the OEM.  See IPMI 2.0 specification, Chapter 6 and
3895  * Chapter 22, sections 22.6 and 22.24 for more details.
3896  */
3897 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
3898                                   struct ipmi_smi_msg *msg)
3899 {
3900         struct cmd_rcvr       *rcvr;
3901         int                   rv = 0;
3902         unsigned char         netfn;
3903         unsigned char         cmd;
3904         unsigned char         chan;
3905         struct ipmi_user *user = NULL;
3906         struct ipmi_system_interface_addr *smi_addr;
3907         struct ipmi_recv_msg  *recv_msg;
3908
3909         /*
3910          * We expect the OEM SW to perform error checking
3911          * so we just do some basic sanity checks
3912          */
3913         if (msg->rsp_size < 4) {
3914                 /* Message not big enough, just ignore it. */
3915                 ipmi_inc_stat(intf, invalid_commands);
3916                 return 0;
3917         }
3918
3919         if (msg->rsp[2] != 0) {
3920                 /* An error getting the response, just ignore it. */
3921                 return 0;
3922         }
3923
3924         /*
3925          * This is an OEM Message so the OEM needs to know how
3926          * handle the message. We do no interpretation.
3927          */
3928         netfn = msg->rsp[0] >> 2;
3929         cmd = msg->rsp[1];
3930         chan = msg->rsp[3] & 0xf;
3931
3932         rcu_read_lock();
3933         rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3934         if (rcvr) {
3935                 user = rcvr->user;
3936                 kref_get(&user->refcount);
3937         } else
3938                 user = NULL;
3939         rcu_read_unlock();
3940
3941         if (user == NULL) {
3942                 /* We didn't find a user, just give up. */
3943                 ipmi_inc_stat(intf, unhandled_commands);
3944
3945                 /*
3946                  * Don't do anything with these messages, just allow
3947                  * them to be freed.
3948                  */
3949
3950                 rv = 0;
3951         } else {
3952                 recv_msg = ipmi_alloc_recv_msg();
3953                 if (!recv_msg) {
3954                         /*
3955                          * We couldn't allocate memory for the
3956                          * message, so requeue it for handling
3957                          * later.
3958                          */
3959                         rv = 1;
3960                         kref_put(&user->refcount, free_user);
3961                 } else {
3962                         /*
3963                          * OEM Messages are expected to be delivered via
3964                          * the system interface to SMS software.  We might
3965                          * need to visit this again depending on OEM
3966                          * requirements
3967                          */
3968                         smi_addr = ((struct ipmi_system_interface_addr *)
3969                                     &recv_msg->addr);
3970                         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3971                         smi_addr->channel = IPMI_BMC_CHANNEL;
3972                         smi_addr->lun = msg->rsp[0] & 3;
3973
3974                         recv_msg->user = user;
3975                         recv_msg->user_msg_data = NULL;
3976                         recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
3977                         recv_msg->msg.netfn = msg->rsp[0] >> 2;
3978                         recv_msg->msg.cmd = msg->rsp[1];
3979                         recv_msg->msg.data = recv_msg->msg_data;
3980
3981                         /*
3982                          * The message starts at byte 4 which follows the
3983                          * the Channel Byte in the "GET MESSAGE" command
3984                          */
3985                         recv_msg->msg.data_len = msg->rsp_size - 4;
3986                         memcpy(recv_msg->msg_data, &msg->rsp[4],
3987                                msg->rsp_size - 4);
3988                         if (deliver_response(intf, recv_msg))
3989                                 ipmi_inc_stat(intf, unhandled_commands);
3990                         else
3991                                 ipmi_inc_stat(intf, handled_commands);
3992                 }
3993         }
3994
3995         return rv;
3996 }
3997
3998 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3999                                      struct ipmi_smi_msg  *msg)
4000 {
4001         struct ipmi_system_interface_addr *smi_addr;
4002
4003         recv_msg->msgid = 0;
4004         smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4005         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4006         smi_addr->channel = IPMI_BMC_CHANNEL;
4007         smi_addr->lun = msg->rsp[0] & 3;
4008         recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4009         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4010         recv_msg->msg.cmd = msg->rsp[1];
4011         memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4012         recv_msg->msg.data = recv_msg->msg_data;
4013         recv_msg->msg.data_len = msg->rsp_size - 3;
4014 }
4015
4016 static int handle_read_event_rsp(struct ipmi_smi *intf,
4017                                  struct ipmi_smi_msg *msg)
4018 {
4019         struct ipmi_recv_msg *recv_msg, *recv_msg2;
4020         struct list_head     msgs;
4021         struct ipmi_user     *user;
4022         int rv = 0, deliver_count = 0, index;
4023         unsigned long        flags;
4024
4025         if (msg->rsp_size < 19) {
4026                 /* Message is too small to be an IPMB event. */
4027                 ipmi_inc_stat(intf, invalid_events);
4028                 return 0;
4029         }
4030
4031         if (msg->rsp[2] != 0) {
4032                 /* An error getting the event, just ignore it. */
4033                 return 0;
4034         }
4035
4036         INIT_LIST_HEAD(&msgs);
4037
4038         spin_lock_irqsave(&intf->events_lock, flags);
4039
4040         ipmi_inc_stat(intf, events);
4041
4042         /*
4043          * Allocate and fill in one message for every user that is
4044          * getting events.
4045          */
4046         index = srcu_read_lock(&intf->users_srcu);
4047         list_for_each_entry_rcu(user, &intf->users, link) {
4048                 if (!user->gets_events)
4049                         continue;
4050
4051                 recv_msg = ipmi_alloc_recv_msg();
4052                 if (!recv_msg) {
4053                         rcu_read_unlock();
4054                         list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4055                                                  link) {
4056                                 list_del(&recv_msg->link);
4057                                 ipmi_free_recv_msg(recv_msg);
4058                         }
4059                         /*
4060                          * We couldn't allocate memory for the
4061                          * message, so requeue it for handling
4062                          * later.
4063                          */
4064                         rv = 1;
4065                         goto out;
4066                 }
4067
4068                 deliver_count++;
4069
4070                 copy_event_into_recv_msg(recv_msg, msg);
4071                 recv_msg->user = user;
4072                 kref_get(&user->refcount);
4073                 list_add_tail(&recv_msg->link, &msgs);
4074         }
4075         srcu_read_unlock(&intf->users_srcu, index);
4076
4077         if (deliver_count) {
4078                 /* Now deliver all the messages. */
4079                 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4080                         list_del(&recv_msg->link);
4081                         deliver_local_response(intf, recv_msg);
4082                 }
4083         } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4084                 /*
4085                  * No one to receive the message, put it in queue if there's
4086                  * not already too many things in the queue.
4087                  */
4088                 recv_msg = ipmi_alloc_recv_msg();
4089                 if (!recv_msg) {
4090                         /*
4091                          * We couldn't allocate memory for the
4092                          * message, so requeue it for handling
4093                          * later.
4094                          */
4095                         rv = 1;
4096                         goto out;
4097                 }
4098
4099                 copy_event_into_recv_msg(recv_msg, msg);
4100                 list_add_tail(&recv_msg->link, &intf->waiting_events);
4101                 intf->waiting_events_count++;
4102         } else if (!intf->event_msg_printed) {
4103                 /*
4104                  * There's too many things in the queue, discard this
4105                  * message.
4106                  */
4107                 dev_warn(intf->si_dev,
4108                          PFX "Event queue full, discarding incoming events\n");
4109                 intf->event_msg_printed = 1;
4110         }
4111
4112  out:
4113         spin_unlock_irqrestore(&intf->events_lock, flags);
4114
4115         return rv;
4116 }
4117
4118 static int handle_bmc_rsp(struct ipmi_smi *intf,
4119                           struct ipmi_smi_msg *msg)
4120 {
4121         struct ipmi_recv_msg *recv_msg;
4122         struct ipmi_system_interface_addr *smi_addr;
4123
4124         recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4125         if (recv_msg == NULL) {
4126                 dev_warn(intf->si_dev,
4127                          "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error.  Contact your hardware vender for assistance\n");
4128                 return 0;
4129         }
4130
4131         recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4132         recv_msg->msgid = msg->msgid;
4133         smi_addr = ((struct ipmi_system_interface_addr *)
4134                     &recv_msg->addr);
4135         smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4136         smi_addr->channel = IPMI_BMC_CHANNEL;
4137         smi_addr->lun = msg->rsp[0] & 3;
4138         recv_msg->msg.netfn = msg->rsp[0] >> 2;
4139         recv_msg->msg.cmd = msg->rsp[1];
4140         memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4141         recv_msg->msg.data = recv_msg->msg_data;
4142         recv_msg->msg.data_len = msg->rsp_size - 2;
4143         deliver_local_response(intf, recv_msg);
4144
4145         return 0;
4146 }
4147
4148 /*
4149  * Handle a received message.  Return 1 if the message should be requeued,
4150  * 0 if the message should be freed, or -1 if the message should not
4151  * be freed or requeued.
4152  */
4153 static int handle_one_recv_msg(struct ipmi_smi *intf,
4154                                struct ipmi_smi_msg *msg)
4155 {
4156         int requeue;
4157         int chan;
4158
4159         ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4160         if (msg->rsp_size < 2) {
4161                 /* Message is too small to be correct. */
4162                 dev_warn(intf->si_dev,
4163                          PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
4164                          (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4165
4166                 /* Generate an error response for the message. */
4167                 msg->rsp[0] = msg->data[0] | (1 << 2);
4168                 msg->rsp[1] = msg->data[1];
4169                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4170                 msg->rsp_size = 3;
4171         } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4172                    || (msg->rsp[1] != msg->data[1])) {
4173                 /*
4174                  * The NetFN and Command in the response is not even
4175                  * marginally correct.
4176                  */
4177                 dev_warn(intf->si_dev,
4178                          PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4179                          (msg->data[0] >> 2) | 1, msg->data[1],
4180                          msg->rsp[0] >> 2, msg->rsp[1]);
4181
4182                 /* Generate an error response for the message. */
4183                 msg->rsp[0] = msg->data[0] | (1 << 2);
4184                 msg->rsp[1] = msg->data[1];
4185                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4186                 msg->rsp_size = 3;
4187         }
4188
4189         if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4190             && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4191             && (msg->user_data != NULL)) {
4192                 /*
4193                  * It's a response to a response we sent.  For this we
4194                  * deliver a send message response to the user.
4195                  */
4196                 struct ipmi_recv_msg *recv_msg = msg->user_data;
4197
4198                 requeue = 0;
4199                 if (msg->rsp_size < 2)
4200                         /* Message is too small to be correct. */
4201                         goto out;
4202
4203                 chan = msg->data[2] & 0x0f;
4204                 if (chan >= IPMI_MAX_CHANNELS)
4205                         /* Invalid channel number */
4206                         goto out;
4207
4208                 if (!recv_msg)
4209                         goto out;
4210
4211                 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4212                 recv_msg->msg.data = recv_msg->msg_data;
4213                 recv_msg->msg.data_len = 1;
4214                 recv_msg->msg_data[0] = msg->rsp[2];
4215                 deliver_local_response(intf, recv_msg);
4216         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4217                    && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4218                 struct ipmi_channel   *chans;
4219
4220                 /* It's from the receive queue. */
4221                 chan = msg->rsp[3] & 0xf;
4222                 if (chan >= IPMI_MAX_CHANNELS) {
4223                         /* Invalid channel number */
4224                         requeue = 0;
4225                         goto out;
4226                 }
4227
4228                 /*
4229                  * We need to make sure the channels have been initialized.
4230                  * The channel_handler routine will set the "curr_channel"
4231                  * equal to or greater than IPMI_MAX_CHANNELS when all the
4232                  * channels for this interface have been initialized.
4233                  */
4234                 if (!intf->channels_ready) {
4235                         requeue = 0; /* Throw the message away */
4236                         goto out;
4237                 }
4238
4239                 chans = READ_ONCE(intf->channel_list)->c;
4240
4241                 switch (chans[chan].medium) {
4242                 case IPMI_CHANNEL_MEDIUM_IPMB:
4243                         if (msg->rsp[4] & 0x04) {
4244                                 /*
4245                                  * It's a response, so find the
4246                                  * requesting message and send it up.
4247                                  */
4248                                 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4249                         } else {
4250                                 /*
4251                                  * It's a command to the SMS from some other
4252                                  * entity.  Handle that.
4253                                  */
4254                                 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4255                         }
4256                         break;
4257
4258                 case IPMI_CHANNEL_MEDIUM_8023LAN:
4259                 case IPMI_CHANNEL_MEDIUM_ASYNC:
4260                         if (msg->rsp[6] & 0x04) {
4261                                 /*
4262                                  * It's a response, so find the
4263                                  * requesting message and send it up.
4264                                  */
4265                                 requeue = handle_lan_get_msg_rsp(intf, msg);
4266                         } else {
4267                                 /*
4268                                  * It's a command to the SMS from some other
4269                                  * entity.  Handle that.
4270                                  */
4271                                 requeue = handle_lan_get_msg_cmd(intf, msg);
4272                         }
4273                         break;
4274
4275                 default:
4276                         /* Check for OEM Channels.  Clients had better
4277                            register for these commands. */
4278                         if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4279                             && (chans[chan].medium
4280                                 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4281                                 requeue = handle_oem_get_msg_cmd(intf, msg);
4282                         } else {
4283                                 /*
4284                                  * We don't handle the channel type, so just
4285                                  * free the message.
4286                                  */
4287                                 requeue = 0;
4288                         }
4289                 }
4290
4291         } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4292                    && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4293                 /* It's an asynchronous event. */
4294                 requeue = handle_read_event_rsp(intf, msg);
4295         } else {
4296                 /* It's a response from the local BMC. */
4297                 requeue = handle_bmc_rsp(intf, msg);
4298         }
4299
4300  out:
4301         return requeue;
4302 }
4303
4304 /*
4305  * If there are messages in the queue or pretimeouts, handle them.
4306  */
4307 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4308 {
4309         struct ipmi_smi_msg  *smi_msg;
4310         unsigned long        flags = 0;
4311         int                  rv;
4312         int                  run_to_completion = intf->run_to_completion;
4313
4314         /* See if any waiting messages need to be processed. */
4315         if (!run_to_completion)
4316                 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4317         while (!list_empty(&intf->waiting_rcv_msgs)) {
4318                 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4319                                      struct ipmi_smi_msg, link);
4320                 list_del(&smi_msg->link);
4321                 if (!run_to_completion)
4322                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4323                                                flags);
4324                 rv = handle_one_recv_msg(intf, smi_msg);
4325                 if (!run_to_completion)
4326                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4327                 if (rv > 0) {
4328                         /*
4329                          * To preserve message order, quit if we
4330                          * can't handle a message.  Add the message
4331                          * back at the head, this is safe because this
4332                          * tasklet is the only thing that pulls the
4333                          * messages.
4334                          */
4335                         list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4336                         break;
4337                 } else {
4338                         if (rv == 0)
4339                                 /* Message handled */
4340                                 ipmi_free_smi_msg(smi_msg);
4341                         /* If rv < 0, fatal error, del but don't free. */
4342                 }
4343         }
4344         if (!run_to_completion)
4345                 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4346
4347         /*
4348          * If the pretimout count is non-zero, decrement one from it and
4349          * deliver pretimeouts to all the users.
4350          */
4351         if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4352                 struct ipmi_user *user;
4353                 int index;
4354
4355                 index = srcu_read_lock(&intf->users_srcu);
4356                 list_for_each_entry_rcu(user, &intf->users, link) {
4357                         if (user->handler->ipmi_watchdog_pretimeout)
4358                                 user->handler->ipmi_watchdog_pretimeout(
4359                                         user->handler_data);
4360                 }
4361                 srcu_read_unlock(&intf->users_srcu, index);
4362         }
4363 }
4364
4365 static void smi_recv_tasklet(unsigned long val)
4366 {
4367         unsigned long flags = 0; /* keep us warning-free. */
4368         struct ipmi_smi *intf = (struct ipmi_smi *) val;
4369         int run_to_completion = intf->run_to_completion;
4370         struct ipmi_smi_msg *newmsg = NULL;
4371
4372         /*
4373          * Start the next message if available.
4374          *
4375          * Do this here, not in the actual receiver, because we may deadlock
4376          * because the lower layer is allowed to hold locks while calling
4377          * message delivery.
4378          */
4379
4380         rcu_read_lock();
4381
4382         if (!run_to_completion)
4383                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4384         if (intf->curr_msg == NULL && !intf->in_shutdown) {
4385                 struct list_head *entry = NULL;
4386
4387                 /* Pick the high priority queue first. */
4388                 if (!list_empty(&intf->hp_xmit_msgs))
4389                         entry = intf->hp_xmit_msgs.next;
4390                 else if (!list_empty(&intf->xmit_msgs))
4391                         entry = intf->xmit_msgs.next;
4392
4393                 if (entry) {
4394                         list_del(entry);
4395                         newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4396                         intf->curr_msg = newmsg;
4397                 }
4398         }
4399         if (!run_to_completion)
4400                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4401         if (newmsg)
4402                 intf->handlers->sender(intf->send_info, newmsg);
4403
4404         rcu_read_unlock();
4405
4406         handle_new_recv_msgs(intf);
4407 }
4408
4409 /* Handle a new message from the lower layer. */
4410 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4411                            struct ipmi_smi_msg *msg)
4412 {
4413         unsigned long flags = 0; /* keep us warning-free. */
4414         int run_to_completion = intf->run_to_completion;
4415
4416         if ((msg->data_size >= 2)
4417             && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4418             && (msg->data[1] == IPMI_SEND_MSG_CMD)
4419             && (msg->user_data == NULL)) {
4420
4421                 if (intf->in_shutdown)
4422                         goto free_msg;
4423
4424                 /*
4425                  * This is the local response to a command send, start
4426                  * the timer for these.  The user_data will not be
4427                  * NULL if this is a response send, and we will let
4428                  * response sends just go through.
4429                  */
4430
4431                 /*
4432                  * Check for errors, if we get certain errors (ones
4433                  * that mean basically we can try again later), we
4434                  * ignore them and start the timer.  Otherwise we
4435                  * report the error immediately.
4436                  */
4437                 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4438                     && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4439                     && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4440                     && (msg->rsp[2] != IPMI_BUS_ERR)
4441                     && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4442                         int ch = msg->rsp[3] & 0xf;
4443                         struct ipmi_channel *chans;
4444
4445                         /* Got an error sending the message, handle it. */
4446
4447                         chans = READ_ONCE(intf->channel_list)->c;
4448                         if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4449                             || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4450                                 ipmi_inc_stat(intf, sent_lan_command_errs);
4451                         else
4452                                 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4453                         intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4454                 } else
4455                         /* The message was sent, start the timer. */
4456                         intf_start_seq_timer(intf, msg->msgid);
4457
4458 free_msg:
4459                 ipmi_free_smi_msg(msg);
4460         } else {
4461                 /*
4462                  * To preserve message order, we keep a queue and deliver from
4463                  * a tasklet.
4464                  */
4465                 if (!run_to_completion)
4466                         spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4467                 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4468                 if (!run_to_completion)
4469                         spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4470                                                flags);
4471         }
4472
4473         if (!run_to_completion)
4474                 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4475         /*
4476          * We can get an asynchronous event or receive message in addition
4477          * to commands we send.
4478          */
4479         if (msg == intf->curr_msg)
4480                 intf->curr_msg = NULL;
4481         if (!run_to_completion)
4482                 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4483
4484         if (run_to_completion)
4485                 smi_recv_tasklet((unsigned long) intf);
4486         else
4487                 tasklet_schedule(&intf->recv_tasklet);
4488 }
4489 EXPORT_SYMBOL(ipmi_smi_msg_received);
4490
4491 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4492 {
4493         if (intf->in_shutdown)
4494                 return;
4495
4496         atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4497         tasklet_schedule(&intf->recv_tasklet);
4498 }
4499 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4500
4501 static struct ipmi_smi_msg *
4502 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4503                   unsigned char seq, long seqid)
4504 {
4505         struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4506         if (!smi_msg)
4507                 /*
4508                  * If we can't allocate the message, then just return, we
4509                  * get 4 retries, so this should be ok.
4510                  */
4511                 return NULL;
4512
4513         memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4514         smi_msg->data_size = recv_msg->msg.data_len;
4515         smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4516
4517         ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4518
4519         return smi_msg;
4520 }
4521
4522 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4523                               struct list_head *timeouts,
4524                               unsigned long timeout_period,
4525                               int slot, unsigned long *flags,
4526                               unsigned int *waiting_msgs)
4527 {
4528         struct ipmi_recv_msg *msg;
4529
4530         if (intf->in_shutdown)
4531                 return;
4532
4533         if (!ent->inuse)
4534                 return;
4535
4536         if (timeout_period < ent->timeout) {
4537                 ent->timeout -= timeout_period;
4538                 (*waiting_msgs)++;
4539                 return;
4540         }
4541
4542         if (ent->retries_left == 0) {
4543                 /* The message has used all its retries. */
4544                 ent->inuse = 0;
4545                 msg = ent->recv_msg;
4546                 list_add_tail(&msg->link, timeouts);
4547                 if (ent->broadcast)
4548                         ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4549                 else if (is_lan_addr(&ent->recv_msg->addr))
4550                         ipmi_inc_stat(intf, timed_out_lan_commands);
4551                 else
4552                         ipmi_inc_stat(intf, timed_out_ipmb_commands);
4553         } else {
4554                 struct ipmi_smi_msg *smi_msg;
4555                 /* More retries, send again. */
4556
4557                 (*waiting_msgs)++;
4558
4559                 /*
4560                  * Start with the max timer, set to normal timer after
4561                  * the message is sent.
4562                  */
4563                 ent->timeout = MAX_MSG_TIMEOUT;
4564                 ent->retries_left--;
4565                 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4566                                             ent->seqid);
4567                 if (!smi_msg) {
4568                         if (is_lan_addr(&ent->recv_msg->addr))
4569                                 ipmi_inc_stat(intf,
4570                                               dropped_rexmit_lan_commands);
4571                         else
4572                                 ipmi_inc_stat(intf,
4573                                               dropped_rexmit_ipmb_commands);
4574                         return;
4575                 }
4576
4577                 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4578
4579                 /*
4580                  * Send the new message.  We send with a zero
4581                  * priority.  It timed out, I doubt time is that
4582                  * critical now, and high priority messages are really
4583                  * only for messages to the local MC, which don't get
4584                  * resent.
4585                  */
4586                 if (intf->handlers) {
4587                         if (is_lan_addr(&ent->recv_msg->addr))
4588                                 ipmi_inc_stat(intf,
4589                                               retransmitted_lan_commands);
4590                         else
4591                                 ipmi_inc_stat(intf,
4592                                               retransmitted_ipmb_commands);
4593
4594                         smi_send(intf, intf->handlers, smi_msg, 0);
4595                 } else
4596                         ipmi_free_smi_msg(smi_msg);
4597
4598                 spin_lock_irqsave(&intf->seq_lock, *flags);
4599         }
4600 }
4601
4602 static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
4603                                          unsigned long timeout_period)
4604 {
4605         struct list_head     timeouts;
4606         struct ipmi_recv_msg *msg, *msg2;
4607         unsigned long        flags;
4608         int                  i;
4609         unsigned int         waiting_msgs = 0;
4610
4611         if (!intf->bmc_registered) {
4612                 kref_get(&intf->refcount);
4613                 if (!schedule_work(&intf->bmc_reg_work)) {
4614                         kref_put(&intf->refcount, intf_free);
4615                         waiting_msgs++;
4616                 }
4617         }
4618
4619         /*
4620          * Go through the seq table and find any messages that
4621          * have timed out, putting them in the timeouts
4622          * list.
4623          */
4624         INIT_LIST_HEAD(&timeouts);
4625         spin_lock_irqsave(&intf->seq_lock, flags);
4626         if (intf->ipmb_maintenance_mode_timeout) {
4627                 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
4628                         intf->ipmb_maintenance_mode_timeout = 0;
4629                 else
4630                         intf->ipmb_maintenance_mode_timeout -= timeout_period;
4631         }
4632         for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4633                 check_msg_timeout(intf, &intf->seq_table[i],
4634                                   &timeouts, timeout_period, i,
4635                                   &flags, &waiting_msgs);
4636         spin_unlock_irqrestore(&intf->seq_lock, flags);
4637
4638         list_for_each_entry_safe(msg, msg2, &timeouts, link)
4639                 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
4640
4641         /*
4642          * Maintenance mode handling.  Check the timeout
4643          * optimistically before we claim the lock.  It may
4644          * mean a timeout gets missed occasionally, but that
4645          * only means the timeout gets extended by one period
4646          * in that case.  No big deal, and it avoids the lock
4647          * most of the time.
4648          */
4649         if (intf->auto_maintenance_timeout > 0) {
4650                 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4651                 if (intf->auto_maintenance_timeout > 0) {
4652                         intf->auto_maintenance_timeout
4653                                 -= timeout_period;
4654                         if (!intf->maintenance_mode
4655                             && (intf->auto_maintenance_timeout <= 0)) {
4656                                 intf->maintenance_mode_enable = false;
4657                                 maintenance_mode_update(intf);
4658                         }
4659                 }
4660                 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4661                                        flags);
4662         }
4663
4664         tasklet_schedule(&intf->recv_tasklet);
4665
4666         return waiting_msgs;
4667 }
4668
4669 static void ipmi_request_event(struct ipmi_smi *intf)
4670 {
4671         /* No event requests when in maintenance mode. */
4672         if (intf->maintenance_mode_enable)
4673                 return;
4674
4675         if (!intf->in_shutdown)
4676                 intf->handlers->request_events(intf->send_info);
4677 }
4678
4679 static struct timer_list ipmi_timer;
4680
4681 static atomic_t stop_operation;
4682
4683 static void ipmi_timeout(struct timer_list *unused)
4684 {
4685         struct ipmi_smi *intf;
4686         int nt = 0, index;
4687
4688         if (atomic_read(&stop_operation))
4689                 return;
4690
4691         index = srcu_read_lock(&ipmi_interfaces_srcu);
4692         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4693                 int lnt = 0;
4694
4695                 if (atomic_read(&intf->event_waiters)) {
4696                         intf->ticks_to_req_ev--;
4697                         if (intf->ticks_to_req_ev == 0) {
4698                                 ipmi_request_event(intf);
4699                                 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4700                         }
4701                         lnt++;
4702                 }
4703
4704                 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4705
4706                 lnt = !!lnt;
4707                 if (lnt != intf->last_needs_timer &&
4708                                         intf->handlers->set_need_watch)
4709                         intf->handlers->set_need_watch(intf->send_info, lnt);
4710                 intf->last_needs_timer = lnt;
4711
4712                 nt += lnt;
4713         }
4714         srcu_read_unlock(&ipmi_interfaces_srcu, index);
4715
4716         if (nt)
4717                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4718 }
4719
4720 static void need_waiter(struct ipmi_smi *intf)
4721 {
4722         /* Racy, but worst case we start the timer twice. */
4723         if (!timer_pending(&ipmi_timer))
4724                 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4725 }
4726
4727 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4728 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4729
4730 static void free_smi_msg(struct ipmi_smi_msg *msg)
4731 {
4732         atomic_dec(&smi_msg_inuse_count);
4733         kfree(msg);
4734 }
4735
4736 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4737 {
4738         struct ipmi_smi_msg *rv;
4739         rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4740         if (rv) {
4741                 rv->done = free_smi_msg;
4742                 rv->user_data = NULL;
4743                 atomic_inc(&smi_msg_inuse_count);
4744         }
4745         return rv;
4746 }
4747 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4748
4749 static void free_recv_msg(struct ipmi_recv_msg *msg)
4750 {
4751         atomic_dec(&recv_msg_inuse_count);
4752         kfree(msg);
4753 }
4754
4755 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4756 {
4757         struct ipmi_recv_msg *rv;
4758
4759         rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4760         if (rv) {
4761                 rv->user = NULL;
4762                 rv->done = free_recv_msg;
4763                 atomic_inc(&recv_msg_inuse_count);
4764         }
4765         return rv;
4766 }
4767
4768 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4769 {
4770         if (msg->user)
4771                 kref_put(&msg->user->refcount, free_user);
4772         msg->done(msg);
4773 }
4774 EXPORT_SYMBOL(ipmi_free_recv_msg);
4775
4776 static atomic_t panic_done_count = ATOMIC_INIT(0);
4777
4778 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4779 {
4780         atomic_dec(&panic_done_count);
4781 }
4782
4783 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4784 {
4785         atomic_dec(&panic_done_count);
4786 }
4787
4788 /*
4789  * Inside a panic, send a message and wait for a response.
4790  */
4791 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
4792                                         struct ipmi_addr *addr,
4793                                         struct kernel_ipmi_msg *msg)
4794 {
4795         struct ipmi_smi_msg  smi_msg;
4796         struct ipmi_recv_msg recv_msg;
4797         int rv;
4798
4799         smi_msg.done = dummy_smi_done_handler;
4800         recv_msg.done = dummy_recv_done_handler;
4801         atomic_add(2, &panic_done_count);
4802         rv = i_ipmi_request(NULL,
4803                             intf,
4804                             addr,
4805                             0,
4806                             msg,
4807                             intf,
4808                             &smi_msg,
4809                             &recv_msg,
4810                             0,
4811                             intf->addrinfo[0].address,
4812                             intf->addrinfo[0].lun,
4813                             0, 1); /* Don't retry, and don't wait. */
4814         if (rv)
4815                 atomic_sub(2, &panic_done_count);
4816         else if (intf->handlers->flush_messages)
4817                 intf->handlers->flush_messages(intf->send_info);
4818
4819         while (atomic_read(&panic_done_count) != 0)
4820                 ipmi_poll(intf);
4821 }
4822
4823 static void event_receiver_fetcher(struct ipmi_smi *intf,
4824                                    struct ipmi_recv_msg *msg)
4825 {
4826         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4827             && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4828             && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4829             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4830                 /* A get event receiver command, save it. */
4831                 intf->event_receiver = msg->msg.data[1];
4832                 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4833         }
4834 }
4835
4836 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
4837 {
4838         if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4839             && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4840             && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4841             && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4842                 /*
4843                  * A get device id command, save if we are an event
4844                  * receiver or generator.
4845                  */
4846                 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4847                 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4848         }
4849 }
4850
4851 static void send_panic_events(struct ipmi_smi *intf, char *str)
4852 {
4853         struct kernel_ipmi_msg msg;
4854         unsigned char data[16];
4855         struct ipmi_system_interface_addr *si;
4856         struct ipmi_addr addr;
4857         char *p = str;
4858         struct ipmi_ipmb_addr *ipmb;
4859         int j;
4860
4861         if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4862                 return;
4863
4864         si = (struct ipmi_system_interface_addr *) &addr;
4865         si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4866         si->channel = IPMI_BMC_CHANNEL;
4867         si->lun = 0;
4868
4869         /* Fill in an event telling that we have failed. */
4870         msg.netfn = 0x04; /* Sensor or Event. */
4871         msg.cmd = 2; /* Platform event command. */
4872         msg.data = data;
4873         msg.data_len = 8;
4874         data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4875         data[1] = 0x03; /* This is for IPMI 1.0. */
4876         data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4877         data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4878         data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4879
4880         /*
4881          * Put a few breadcrumbs in.  Hopefully later we can add more things
4882          * to make the panic events more useful.
4883          */
4884         if (str) {
4885                 data[3] = str[0];
4886                 data[6] = str[1];
4887                 data[7] = str[2];
4888         }
4889
4890         /* Send the event announcing the panic. */
4891         ipmi_panic_request_and_wait(intf, &addr, &msg);
4892
4893         /*
4894          * On every interface, dump a bunch of OEM event holding the
4895          * string.
4896          */
4897         if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4898                 return;
4899
4900         /*
4901          * intf_num is used as an marker to tell if the
4902          * interface is valid.  Thus we need a read barrier to
4903          * make sure data fetched before checking intf_num
4904          * won't be used.
4905          */
4906         smp_rmb();
4907
4908         /*
4909          * First job here is to figure out where to send the
4910          * OEM events.  There's no way in IPMI to send OEM
4911          * events using an event send command, so we have to
4912          * find the SEL to put them in and stick them in
4913          * there.
4914          */
4915
4916         /* Get capabilities from the get device id. */
4917         intf->local_sel_device = 0;
4918         intf->local_event_generator = 0;
4919         intf->event_receiver = 0;
4920
4921         /* Request the device info from the local MC. */
4922         msg.netfn = IPMI_NETFN_APP_REQUEST;
4923         msg.cmd = IPMI_GET_DEVICE_ID_CMD;
4924         msg.data = NULL;
4925         msg.data_len = 0;
4926         intf->null_user_handler = device_id_fetcher;
4927         ipmi_panic_request_and_wait(intf, &addr, &msg);
4928
4929         if (intf->local_event_generator) {
4930                 /* Request the event receiver from the local MC. */
4931                 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
4932                 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
4933                 msg.data = NULL;
4934                 msg.data_len = 0;
4935                 intf->null_user_handler = event_receiver_fetcher;
4936                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4937         }
4938         intf->null_user_handler = NULL;
4939
4940         /*
4941          * Validate the event receiver.  The low bit must not
4942          * be 1 (it must be a valid IPMB address), it cannot
4943          * be zero, and it must not be my address.
4944          */
4945         if (((intf->event_receiver & 1) == 0)
4946             && (intf->event_receiver != 0)
4947             && (intf->event_receiver != intf->addrinfo[0].address)) {
4948                 /*
4949                  * The event receiver is valid, send an IPMB
4950                  * message.
4951                  */
4952                 ipmb = (struct ipmi_ipmb_addr *) &addr;
4953                 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4954                 ipmb->channel = 0; /* FIXME - is this right? */
4955                 ipmb->lun = intf->event_receiver_lun;
4956                 ipmb->slave_addr = intf->event_receiver;
4957         } else if (intf->local_sel_device) {
4958                 /*
4959                  * The event receiver was not valid (or was
4960                  * me), but I am an SEL device, just dump it
4961                  * in my SEL.
4962                  */
4963                 si = (struct ipmi_system_interface_addr *) &addr;
4964                 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4965                 si->channel = IPMI_BMC_CHANNEL;
4966                 si->lun = 0;
4967         } else
4968                 return; /* No where to send the event. */
4969
4970         msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4971         msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4972         msg.data = data;
4973         msg.data_len = 16;
4974
4975         j = 0;
4976         while (*p) {
4977                 int size = strlen(p);
4978
4979                 if (size > 11)
4980                         size = 11;
4981                 data[0] = 0;
4982                 data[1] = 0;
4983                 data[2] = 0xf0; /* OEM event without timestamp. */
4984                 data[3] = intf->addrinfo[0].address;
4985                 data[4] = j++; /* sequence # */
4986                 /*
4987                  * Always give 11 bytes, so strncpy will fill
4988                  * it with zeroes for me.
4989                  */
4990                 strncpy(data+5, p, 11);
4991                 p += size;
4992
4993                 ipmi_panic_request_and_wait(intf, &addr, &msg);
4994         }
4995 }
4996
4997 static int has_panicked;
4998
4999 static int panic_event(struct notifier_block *this,
5000                        unsigned long         event,
5001                        void                  *ptr)
5002 {
5003         struct ipmi_smi *intf;
5004         struct ipmi_user *user;
5005
5006         if (has_panicked)
5007                 return NOTIFY_DONE;
5008         has_panicked = 1;
5009
5010         /* For every registered interface, set it to run to completion. */
5011         list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5012                 if (!intf->handlers || intf->intf_num == -1)
5013                         /* Interface is not ready. */
5014                         continue;
5015
5016                 if (!intf->handlers->poll)
5017                         continue;
5018
5019                 /*
5020                  * If we were interrupted while locking xmit_msgs_lock or
5021                  * waiting_rcv_msgs_lock, the corresponding list may be
5022                  * corrupted.  In this case, drop items on the list for
5023                  * the safety.
5024                  */
5025                 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5026                         INIT_LIST_HEAD(&intf->xmit_msgs);
5027                         INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5028                 } else
5029                         spin_unlock(&intf->xmit_msgs_lock);
5030
5031                 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5032                         INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5033                 else
5034                         spin_unlock(&intf->waiting_rcv_msgs_lock);
5035
5036                 intf->run_to_completion = 1;
5037                 if (intf->handlers->set_run_to_completion)
5038                         intf->handlers->set_run_to_completion(intf->send_info,
5039                                                               1);
5040
5041                 list_for_each_entry_rcu(user, &intf->users, link) {
5042                         if (user->handler->ipmi_panic_handler)
5043                                 user->handler->ipmi_panic_handler(
5044                                         user->handler_data);
5045                 }
5046
5047                 send_panic_events(intf, ptr);
5048         }
5049
5050         return NOTIFY_DONE;
5051 }
5052
5053 /* Must be called with ipmi_interfaces_mutex held. */
5054 static int ipmi_register_driver(void)
5055 {
5056         int rv;
5057
5058         if (drvregistered)
5059                 return 0;
5060
5061         rv = driver_register(&ipmidriver.driver);
5062         if (rv)
5063                 pr_err("Could not register IPMI driver\n");
5064         else
5065                 drvregistered = true;
5066         return rv;
5067 }
5068
5069 static struct notifier_block panic_block = {
5070         .notifier_call  = panic_event,
5071         .next           = NULL,
5072         .priority       = 200   /* priority: INT_MAX >= x >= 0 */
5073 };
5074
5075 static int ipmi_init_msghandler(void)
5076 {
5077         int rv;
5078
5079         mutex_lock(&ipmi_interfaces_mutex);
5080         rv = ipmi_register_driver();
5081         if (rv)
5082                 goto out;
5083         if (initialized)
5084                 goto out;
5085
5086         init_srcu_struct(&ipmi_interfaces_srcu);
5087
5088         timer_setup(&ipmi_timer, ipmi_timeout, 0);
5089         mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5090
5091         atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5092
5093         initialized = true;
5094
5095 out:
5096         mutex_unlock(&ipmi_interfaces_mutex);
5097         return rv;
5098 }
5099
5100 static int __init ipmi_init_msghandler_mod(void)
5101 {
5102         int rv;
5103
5104         pr_info("version " IPMI_DRIVER_VERSION "\n");
5105
5106         mutex_lock(&ipmi_interfaces_mutex);
5107         rv = ipmi_register_driver();
5108         mutex_unlock(&ipmi_interfaces_mutex);
5109
5110         return rv;
5111 }
5112
5113 static void __exit cleanup_ipmi(void)
5114 {
5115         int count;
5116
5117         if (initialized) {
5118                 atomic_notifier_chain_unregister(&panic_notifier_list,
5119                                                  &panic_block);
5120
5121                 /*
5122                  * This can't be called if any interfaces exist, so no worry
5123                  * about shutting down the interfaces.
5124                  */
5125
5126                 /*
5127                  * Tell the timer to stop, then wait for it to stop.  This
5128                  * avoids problems with race conditions removing the timer
5129                  * here.
5130                  */
5131                 atomic_inc(&stop_operation);
5132                 del_timer_sync(&ipmi_timer);
5133
5134                 initialized = false;
5135
5136                 /* Check for buffer leaks. */
5137                 count = atomic_read(&smi_msg_inuse_count);
5138                 if (count != 0)
5139                         pr_warn(PFX "SMI message count %d at exit\n", count);
5140                 count = atomic_read(&recv_msg_inuse_count);
5141                 if (count != 0)
5142                         pr_warn(PFX "recv message count %d at exit\n", count);
5143                 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5144         }
5145         if (drvregistered)
5146                 driver_unregister(&ipmidriver.driver);
5147 }
5148 module_exit(cleanup_ipmi);
5149
5150 module_init(ipmi_init_msghandler_mod);
5151 MODULE_LICENSE("GPL");
5152 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5153 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5154                    " interface.");
5155 MODULE_VERSION(IPMI_DRIVER_VERSION);
5156 MODULE_SOFTDEP("post: ipmi_devintf");