1 // SPDX-License-Identifier: GPL-2.0+
5 * Incoming and outgoing message routing for an IPMI interface.
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
14 #define pr_fmt(fmt) "IPMI message handler: " fmt
15 #define dev_fmt(fmt) pr_fmt(fmt)
17 #include <linux/module.h>
18 #include <linux/errno.h>
19 #include <linux/panic_notifier.h>
20 #include <linux/poll.h>
21 #include <linux/sched.h>
22 #include <linux/seq_file.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <linux/ipmi.h>
27 #include <linux/ipmi_smi.h>
28 #include <linux/notifier.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/rcupdate.h>
32 #include <linux/interrupt.h>
33 #include <linux/moduleparam.h>
34 #include <linux/workqueue.h>
35 #include <linux/uuid.h>
36 #include <linux/nospec.h>
37 #include <linux/vmalloc.h>
38 #include <linux/delay.h>
40 #define IPMI_DRIVER_VERSION "39.2"
42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
43 static int ipmi_init_msghandler(void);
44 static void smi_recv_tasklet(struct tasklet_struct *t);
45 static void handle_new_recv_msgs(struct ipmi_smi *intf);
46 static void need_waiter(struct ipmi_smi *intf);
47 static int handle_one_recv_msg(struct ipmi_smi *intf,
48 struct ipmi_smi_msg *msg);
50 static bool initialized;
51 static bool drvregistered;
53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
54 enum ipmi_panic_event_op {
55 IPMI_SEND_PANIC_EVENT_NONE,
56 IPMI_SEND_PANIC_EVENT,
57 IPMI_SEND_PANIC_EVENT_STRING,
58 IPMI_SEND_PANIC_EVENT_MAX
61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */
62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
64 #ifdef CONFIG_IPMI_PANIC_STRING
65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
66 #elif defined(CONFIG_IPMI_PANIC_EVENT)
67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
74 static int panic_op_write_handler(const char *val,
75 const struct kernel_param *kp)
80 strscpy(valcp, val, sizeof(valcp));
81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
85 ipmi_send_panic_event = e;
89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
91 const char *event_str;
93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
96 event_str = ipmi_panic_event_str[ipmi_send_panic_event];
98 return sprintf(buffer, "%s\n", event_str);
101 static const struct kernel_param_ops panic_op_ops = {
102 .set = panic_op_write_handler,
103 .get = panic_op_read_handler
105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
109 #define MAX_EVENTS_IN_QUEUE 25
111 /* Remain in auto-maintenance mode for this amount of time (in ms). */
112 static unsigned long maintenance_mode_timeout_ms = 30000;
113 module_param(maintenance_mode_timeout_ms, ulong, 0644);
114 MODULE_PARM_DESC(maintenance_mode_timeout_ms,
115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
118 * Don't let a message sit in a queue forever, always time it with at lest
119 * the max message timer. This is in milliseconds.
121 #define MAX_MSG_TIMEOUT 60000
124 * Timeout times below are in milliseconds, and are done off a 1
125 * second timer. So setting the value to 1000 would mean anything
126 * between 0 and 1000ms. So really the only reasonable minimum
127 * setting it 2000ms, which is between 1 and 2 seconds.
130 /* The default timeout for message retries. */
131 static unsigned long default_retry_ms = 2000;
132 module_param(default_retry_ms, ulong, 0644);
133 MODULE_PARM_DESC(default_retry_ms,
134 "The time (milliseconds) between retry sends");
136 /* The default timeout for maintenance mode message retries. */
137 static unsigned long default_maintenance_retry_ms = 3000;
138 module_param(default_maintenance_retry_ms, ulong, 0644);
139 MODULE_PARM_DESC(default_maintenance_retry_ms,
140 "The time (milliseconds) between retry sends in maintenance mode");
142 /* The default maximum number of retries */
143 static unsigned int default_max_retries = 4;
144 module_param(default_max_retries, uint, 0644);
145 MODULE_PARM_DESC(default_max_retries,
146 "The time (milliseconds) between retry sends in maintenance mode");
148 /* The default maximum number of users that may register. */
149 static unsigned int max_users = 30;
150 module_param(max_users, uint, 0644);
151 MODULE_PARM_DESC(max_users,
152 "The most users that may use the IPMI stack at one time.");
154 /* The default maximum number of message a user may have outstanding. */
155 static unsigned int max_msgs_per_user = 100;
156 module_param(max_msgs_per_user, uint, 0644);
157 MODULE_PARM_DESC(max_msgs_per_user,
158 "The most message a user may have outstanding.");
160 /* Call every ~1000 ms. */
161 #define IPMI_TIMEOUT_TIME 1000
163 /* How many jiffies does it take to get to the timeout time. */
164 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
167 * Request events from the queue every second (this is the number of
168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
169 * future, IPMI will add a way to know immediately if an event is in
170 * the queue and this silliness can go away.
172 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
174 /* How long should we cache dynamic device IDs? */
175 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
178 * The main "user" data structure.
181 struct list_head link;
184 * Set to NULL when the user is destroyed, a pointer to myself
185 * so srcu_dereference can be used on it.
187 struct ipmi_user *self;
188 struct srcu_struct release_barrier;
190 struct kref refcount;
192 /* The upper layer that handles receive messages. */
193 const struct ipmi_user_hndl *handler;
196 /* The interface this user is bound to. */
197 struct ipmi_smi *intf;
199 /* Does this interface receive IPMI events? */
204 /* Free must run in process context for RCU cleanup. */
205 struct work_struct remove_work;
208 static struct workqueue_struct *remove_work_wq;
210 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
211 __acquires(user->release_barrier)
213 struct ipmi_user *ruser;
215 *index = srcu_read_lock(&user->release_barrier);
216 ruser = srcu_dereference(user->self, &user->release_barrier);
218 srcu_read_unlock(&user->release_barrier, *index);
222 static void release_ipmi_user(struct ipmi_user *user, int index)
224 srcu_read_unlock(&user->release_barrier, index);
228 struct list_head link;
230 struct ipmi_user *user;
236 * This is used to form a linked lised during mass deletion.
237 * Since this is in an RCU list, we cannot use the link above
238 * or change any data until the RCU period completes. So we
239 * use this next variable during mass deletion so we can have
240 * a list and don't have to wait and restart the search on
241 * every individual deletion of a command.
243 struct cmd_rcvr *next;
247 unsigned int inuse : 1;
248 unsigned int broadcast : 1;
250 unsigned long timeout;
251 unsigned long orig_timeout;
252 unsigned int retries_left;
255 * To verify on an incoming send message response that this is
256 * the message that the response is for, we keep a sequence id
257 * and increment it every time we send a message.
262 * This is held so we can properly respond to the message on a
263 * timeout, and it is used to hold the temporary data for
264 * retransmission, too.
266 struct ipmi_recv_msg *recv_msg;
270 * Store the information in a msgid (long) to allow us to find a
271 * sequence table entry from the msgid.
273 #define STORE_SEQ_IN_MSGID(seq, seqid) \
274 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
276 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
278 seq = (((msgid) >> 26) & 0x3f); \
279 seqid = ((msgid) & 0x3ffffff); \
282 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
284 #define IPMI_MAX_CHANNELS 16
285 struct ipmi_channel {
286 unsigned char medium;
287 unsigned char protocol;
290 struct ipmi_channel_set {
291 struct ipmi_channel c[IPMI_MAX_CHANNELS];
294 struct ipmi_my_addrinfo {
296 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
297 * but may be changed by the user.
299 unsigned char address;
302 * My LUN. This should generally stay the SMS LUN, but just in
309 * Note that the product id, manufacturer id, guid, and device id are
310 * immutable in this structure, so dyn_mutex is not required for
311 * accessing those. If those change on a BMC, a new BMC is allocated.
314 struct platform_device pdev;
315 struct list_head intfs; /* Interfaces on this BMC. */
316 struct ipmi_device_id id;
317 struct ipmi_device_id fetch_id;
319 unsigned long dyn_id_expiry;
320 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
324 struct kref usecount;
325 struct work_struct remove_work;
326 unsigned char cc; /* completion code */
328 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
330 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
331 struct ipmi_device_id *id,
332 bool *guid_set, guid_t *guid);
335 * Various statistics for IPMI, these index stats[] in the ipmi_smi
338 enum ipmi_stat_indexes {
339 /* Commands we got from the user that were invalid. */
340 IPMI_STAT_sent_invalid_commands = 0,
342 /* Commands we sent to the MC. */
343 IPMI_STAT_sent_local_commands,
345 /* Responses from the MC that were delivered to a user. */
346 IPMI_STAT_handled_local_responses,
348 /* Responses from the MC that were not delivered to a user. */
349 IPMI_STAT_unhandled_local_responses,
351 /* Commands we sent out to the IPMB bus. */
352 IPMI_STAT_sent_ipmb_commands,
354 /* Commands sent on the IPMB that had errors on the SEND CMD */
355 IPMI_STAT_sent_ipmb_command_errs,
357 /* Each retransmit increments this count. */
358 IPMI_STAT_retransmitted_ipmb_commands,
361 * When a message times out (runs out of retransmits) this is
364 IPMI_STAT_timed_out_ipmb_commands,
367 * This is like above, but for broadcasts. Broadcasts are
368 * *not* included in the above count (they are expected to
371 IPMI_STAT_timed_out_ipmb_broadcasts,
373 /* Responses I have sent to the IPMB bus. */
374 IPMI_STAT_sent_ipmb_responses,
376 /* The response was delivered to the user. */
377 IPMI_STAT_handled_ipmb_responses,
379 /* The response had invalid data in it. */
380 IPMI_STAT_invalid_ipmb_responses,
382 /* The response didn't have anyone waiting for it. */
383 IPMI_STAT_unhandled_ipmb_responses,
385 /* Commands we sent out to the IPMB bus. */
386 IPMI_STAT_sent_lan_commands,
388 /* Commands sent on the IPMB that had errors on the SEND CMD */
389 IPMI_STAT_sent_lan_command_errs,
391 /* Each retransmit increments this count. */
392 IPMI_STAT_retransmitted_lan_commands,
395 * When a message times out (runs out of retransmits) this is
398 IPMI_STAT_timed_out_lan_commands,
400 /* Responses I have sent to the IPMB bus. */
401 IPMI_STAT_sent_lan_responses,
403 /* The response was delivered to the user. */
404 IPMI_STAT_handled_lan_responses,
406 /* The response had invalid data in it. */
407 IPMI_STAT_invalid_lan_responses,
409 /* The response didn't have anyone waiting for it. */
410 IPMI_STAT_unhandled_lan_responses,
412 /* The command was delivered to the user. */
413 IPMI_STAT_handled_commands,
415 /* The command had invalid data in it. */
416 IPMI_STAT_invalid_commands,
418 /* The command didn't have anyone waiting for it. */
419 IPMI_STAT_unhandled_commands,
421 /* Invalid data in an event. */
422 IPMI_STAT_invalid_events,
424 /* Events that were received with the proper format. */
427 /* Retransmissions on IPMB that failed. */
428 IPMI_STAT_dropped_rexmit_ipmb_commands,
430 /* Retransmissions on LAN that failed. */
431 IPMI_STAT_dropped_rexmit_lan_commands,
433 /* This *must* remain last, add new values above this. */
438 #define IPMI_IPMB_NUM_SEQ 64
440 struct module *owner;
442 /* What interface number are we? */
445 struct kref refcount;
447 /* Set when the interface is being unregistered. */
450 /* Used for a list of interfaces. */
451 struct list_head link;
454 * The list of upper layers that are using me. seq_lock write
455 * protects this. Read protection is with srcu.
457 struct list_head users;
458 struct srcu_struct users_srcu;
460 struct device_attribute nr_users_devattr;
461 struct device_attribute nr_msgs_devattr;
464 /* Used for wake ups at startup. */
465 wait_queue_head_t waitq;
468 * Prevents the interface from being unregistered when the
469 * interface is used by being looked up through the BMC
472 struct mutex bmc_reg_mutex;
474 struct bmc_device tmp_bmc;
475 struct bmc_device *bmc;
477 struct list_head bmc_link;
479 bool in_bmc_register; /* Handle recursive situations. Yuck. */
480 struct work_struct bmc_reg_work;
482 const struct ipmi_smi_handlers *handlers;
485 /* Driver-model device for the system interface. */
486 struct device *si_dev;
489 * A table of sequence numbers for this interface. We use the
490 * sequence numbers for IPMB messages that go out of the
491 * interface to match them up with their responses. A routine
492 * is called periodically to time the items in this list.
495 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
499 * Messages queued for delivery. If delivery fails (out of memory
500 * for instance), They will stay in here to be processed later in a
501 * periodic timer interrupt. The tasklet is for handling received
502 * messages directly from the handler.
504 spinlock_t waiting_rcv_msgs_lock;
505 struct list_head waiting_rcv_msgs;
506 atomic_t watchdog_pretimeouts_to_deliver;
507 struct tasklet_struct recv_tasklet;
509 spinlock_t xmit_msgs_lock;
510 struct list_head xmit_msgs;
511 struct ipmi_smi_msg *curr_msg;
512 struct list_head hp_xmit_msgs;
515 * The list of command receivers that are registered for commands
518 struct mutex cmd_rcvrs_mutex;
519 struct list_head cmd_rcvrs;
522 * Events that were queues because no one was there to receive
525 spinlock_t events_lock; /* For dealing with event stuff. */
526 struct list_head waiting_events;
527 unsigned int waiting_events_count; /* How many events in queue? */
528 char delivering_events;
529 char event_msg_printed;
531 /* How many users are waiting for events? */
532 atomic_t event_waiters;
533 unsigned int ticks_to_req_ev;
535 spinlock_t watch_lock; /* For dealing with watch stuff below. */
537 /* How many users are waiting for commands? */
538 unsigned int command_waiters;
540 /* How many users are waiting for watchdogs? */
541 unsigned int watchdog_waiters;
543 /* How many users are waiting for message responses? */
544 unsigned int response_waiters;
547 * Tells what the lower layer has last been asked to watch for,
548 * messages and/or watchdogs. Protected by watch_lock.
550 unsigned int last_watch_mask;
553 * The event receiver for my BMC, only really used at panic
554 * shutdown as a place to store this.
556 unsigned char event_receiver;
557 unsigned char event_receiver_lun;
558 unsigned char local_sel_device;
559 unsigned char local_event_generator;
561 /* For handling of maintenance mode. */
562 int maintenance_mode;
563 bool maintenance_mode_enable;
564 int auto_maintenance_timeout;
565 spinlock_t maintenance_mode_lock; /* Used in a timer... */
568 * If we are doing maintenance on something on IPMB, extend
569 * the timeout time to avoid timeouts writing firmware and
572 int ipmb_maintenance_mode_timeout;
575 * A cheap hack, if this is non-null and a message to an
576 * interface comes in with a NULL user, call this routine with
577 * it. Note that the message will still be freed by the
578 * caller. This only works on the system interface.
580 * Protected by bmc_reg_mutex.
582 void (*null_user_handler)(struct ipmi_smi *intf,
583 struct ipmi_recv_msg *msg);
586 * When we are scanning the channels for an SMI, this will
587 * tell which channel we are scanning.
591 /* Channel information */
592 struct ipmi_channel_set *channel_list;
593 unsigned int curr_working_cset; /* First index into the following. */
594 struct ipmi_channel_set wchannels[2];
595 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
598 atomic_t stats[IPMI_NUM_STATS];
601 * run_to_completion duplicate of smb_info, smi_info
602 * and ipmi_serial_info structures. Used to decrease numbers of
603 * parameters passed by "low" level IPMI code.
605 int run_to_completion;
607 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
609 static void __get_guid(struct ipmi_smi *intf);
610 static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
611 static int __ipmi_bmc_register(struct ipmi_smi *intf,
612 struct ipmi_device_id *id,
613 bool guid_set, guid_t *guid, int intf_num);
614 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
618 * The driver model view of the IPMI messaging driver.
620 static struct platform_driver ipmidriver = {
623 .bus = &platform_bus_type
627 * This mutex keeps us from adding the same BMC twice.
629 static DEFINE_MUTEX(ipmidriver_mutex);
631 static LIST_HEAD(ipmi_interfaces);
632 static DEFINE_MUTEX(ipmi_interfaces_mutex);
633 #define ipmi_interfaces_mutex_held() \
634 lockdep_is_held(&ipmi_interfaces_mutex)
635 static struct srcu_struct ipmi_interfaces_srcu;
638 * List of watchers that want to know when smi's are added and deleted.
640 static LIST_HEAD(smi_watchers);
641 static DEFINE_MUTEX(smi_watchers_mutex);
643 #define ipmi_inc_stat(intf, stat) \
644 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
645 #define ipmi_get_stat(intf, stat) \
646 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
648 static const char * const addr_src_to_str[] = {
649 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
650 "device-tree", "platform"
653 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
656 src = 0; /* Invalid */
657 return addr_src_to_str[src];
659 EXPORT_SYMBOL(ipmi_addr_src_to_str);
661 static int is_lan_addr(struct ipmi_addr *addr)
663 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
666 static int is_ipmb_addr(struct ipmi_addr *addr)
668 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
671 static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
673 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
676 static int is_ipmb_direct_addr(struct ipmi_addr *addr)
678 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
681 static void free_recv_msg_list(struct list_head *q)
683 struct ipmi_recv_msg *msg, *msg2;
685 list_for_each_entry_safe(msg, msg2, q, link) {
686 list_del(&msg->link);
687 ipmi_free_recv_msg(msg);
691 static void free_smi_msg_list(struct list_head *q)
693 struct ipmi_smi_msg *msg, *msg2;
695 list_for_each_entry_safe(msg, msg2, q, link) {
696 list_del(&msg->link);
697 ipmi_free_smi_msg(msg);
701 static void clean_up_interface_data(struct ipmi_smi *intf)
704 struct cmd_rcvr *rcvr, *rcvr2;
705 struct list_head list;
707 tasklet_kill(&intf->recv_tasklet);
709 free_smi_msg_list(&intf->waiting_rcv_msgs);
710 free_recv_msg_list(&intf->waiting_events);
713 * Wholesale remove all the entries from the list in the
714 * interface and wait for RCU to know that none are in use.
716 mutex_lock(&intf->cmd_rcvrs_mutex);
717 INIT_LIST_HEAD(&list);
718 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
719 mutex_unlock(&intf->cmd_rcvrs_mutex);
721 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
724 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
725 if ((intf->seq_table[i].inuse)
726 && (intf->seq_table[i].recv_msg))
727 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
731 static void intf_free(struct kref *ref)
733 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
735 clean_up_interface_data(intf);
739 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
741 struct ipmi_smi *intf;
745 * Make sure the driver is actually initialized, this handles
746 * problems with initialization order.
748 rv = ipmi_init_msghandler();
752 mutex_lock(&smi_watchers_mutex);
754 list_add(&watcher->link, &smi_watchers);
756 index = srcu_read_lock(&ipmi_interfaces_srcu);
757 list_for_each_entry_rcu(intf, &ipmi_interfaces, link,
758 lockdep_is_held(&smi_watchers_mutex)) {
759 int intf_num = READ_ONCE(intf->intf_num);
763 watcher->new_smi(intf_num, intf->si_dev);
765 srcu_read_unlock(&ipmi_interfaces_srcu, index);
767 mutex_unlock(&smi_watchers_mutex);
771 EXPORT_SYMBOL(ipmi_smi_watcher_register);
773 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
775 mutex_lock(&smi_watchers_mutex);
776 list_del(&watcher->link);
777 mutex_unlock(&smi_watchers_mutex);
780 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
783 * Must be called with smi_watchers_mutex held.
786 call_smi_watchers(int i, struct device *dev)
788 struct ipmi_smi_watcher *w;
790 mutex_lock(&smi_watchers_mutex);
791 list_for_each_entry(w, &smi_watchers, link) {
792 if (try_module_get(w->owner)) {
794 module_put(w->owner);
797 mutex_unlock(&smi_watchers_mutex);
801 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
803 if (addr1->addr_type != addr2->addr_type)
806 if (addr1->channel != addr2->channel)
809 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
810 struct ipmi_system_interface_addr *smi_addr1
811 = (struct ipmi_system_interface_addr *) addr1;
812 struct ipmi_system_interface_addr *smi_addr2
813 = (struct ipmi_system_interface_addr *) addr2;
814 return (smi_addr1->lun == smi_addr2->lun);
817 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
818 struct ipmi_ipmb_addr *ipmb_addr1
819 = (struct ipmi_ipmb_addr *) addr1;
820 struct ipmi_ipmb_addr *ipmb_addr2
821 = (struct ipmi_ipmb_addr *) addr2;
823 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
824 && (ipmb_addr1->lun == ipmb_addr2->lun));
827 if (is_ipmb_direct_addr(addr1)) {
828 struct ipmi_ipmb_direct_addr *daddr1
829 = (struct ipmi_ipmb_direct_addr *) addr1;
830 struct ipmi_ipmb_direct_addr *daddr2
831 = (struct ipmi_ipmb_direct_addr *) addr2;
833 return daddr1->slave_addr == daddr2->slave_addr &&
834 daddr1->rq_lun == daddr2->rq_lun &&
835 daddr1->rs_lun == daddr2->rs_lun;
838 if (is_lan_addr(addr1)) {
839 struct ipmi_lan_addr *lan_addr1
840 = (struct ipmi_lan_addr *) addr1;
841 struct ipmi_lan_addr *lan_addr2
842 = (struct ipmi_lan_addr *) addr2;
844 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
845 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
846 && (lan_addr1->session_handle
847 == lan_addr2->session_handle)
848 && (lan_addr1->lun == lan_addr2->lun));
854 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
856 if (len < sizeof(struct ipmi_system_interface_addr))
859 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
860 if (addr->channel != IPMI_BMC_CHANNEL)
865 if ((addr->channel == IPMI_BMC_CHANNEL)
866 || (addr->channel >= IPMI_MAX_CHANNELS)
867 || (addr->channel < 0))
870 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
871 if (len < sizeof(struct ipmi_ipmb_addr))
876 if (is_ipmb_direct_addr(addr)) {
877 struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
879 if (addr->channel != 0)
881 if (len < sizeof(struct ipmi_ipmb_direct_addr))
884 if (daddr->slave_addr & 0x01)
886 if (daddr->rq_lun >= 4)
888 if (daddr->rs_lun >= 4)
893 if (is_lan_addr(addr)) {
894 if (len < sizeof(struct ipmi_lan_addr))
901 EXPORT_SYMBOL(ipmi_validate_addr);
903 unsigned int ipmi_addr_length(int addr_type)
905 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
906 return sizeof(struct ipmi_system_interface_addr);
908 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
909 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
910 return sizeof(struct ipmi_ipmb_addr);
912 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
913 return sizeof(struct ipmi_ipmb_direct_addr);
915 if (addr_type == IPMI_LAN_ADDR_TYPE)
916 return sizeof(struct ipmi_lan_addr);
920 EXPORT_SYMBOL(ipmi_addr_length);
922 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
927 /* Special handling for NULL users. */
928 if (intf->null_user_handler) {
929 intf->null_user_handler(intf, msg);
931 /* No handler, so give up. */
934 ipmi_free_recv_msg(msg);
935 } else if (oops_in_progress) {
937 * If we are running in the panic context, calling the
938 * receive handler doesn't much meaning and has a deadlock
939 * risk. At this moment, simply skip it in that case.
941 ipmi_free_recv_msg(msg);
942 atomic_dec(&msg->user->nr_msgs);
945 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
948 atomic_dec(&user->nr_msgs);
949 user->handler->ipmi_recv_hndl(msg, user->handler_data);
950 release_ipmi_user(user, index);
952 /* User went away, give up. */
953 ipmi_free_recv_msg(msg);
961 static void deliver_local_response(struct ipmi_smi *intf,
962 struct ipmi_recv_msg *msg)
964 if (deliver_response(intf, msg))
965 ipmi_inc_stat(intf, unhandled_local_responses);
967 ipmi_inc_stat(intf, handled_local_responses);
970 static void deliver_err_response(struct ipmi_smi *intf,
971 struct ipmi_recv_msg *msg, int err)
973 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
974 msg->msg_data[0] = err;
975 msg->msg.netfn |= 1; /* Convert to a response. */
976 msg->msg.data_len = 1;
977 msg->msg.data = msg->msg_data;
978 deliver_local_response(intf, msg);
981 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
983 unsigned long iflags;
985 if (!intf->handlers->set_need_watch)
988 spin_lock_irqsave(&intf->watch_lock, iflags);
989 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
990 intf->response_waiters++;
992 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
993 intf->watchdog_waiters++;
995 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
996 intf->command_waiters++;
998 if ((intf->last_watch_mask & flags) != flags) {
999 intf->last_watch_mask |= flags;
1000 intf->handlers->set_need_watch(intf->send_info,
1001 intf->last_watch_mask);
1003 spin_unlock_irqrestore(&intf->watch_lock, iflags);
1006 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1008 unsigned long iflags;
1010 if (!intf->handlers->set_need_watch)
1013 spin_lock_irqsave(&intf->watch_lock, iflags);
1014 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1015 intf->response_waiters--;
1017 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1018 intf->watchdog_waiters--;
1020 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1021 intf->command_waiters--;
1024 if (intf->response_waiters)
1025 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1026 if (intf->watchdog_waiters)
1027 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1028 if (intf->command_waiters)
1029 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1031 if (intf->last_watch_mask != flags) {
1032 intf->last_watch_mask = flags;
1033 intf->handlers->set_need_watch(intf->send_info,
1034 intf->last_watch_mask);
1036 spin_unlock_irqrestore(&intf->watch_lock, iflags);
1040 * Find the next sequence number not being used and add the given
1041 * message with the given timeout to the sequence table. This must be
1042 * called with the interface's seq_lock held.
1044 static int intf_next_seq(struct ipmi_smi *intf,
1045 struct ipmi_recv_msg *recv_msg,
1046 unsigned long timeout,
1056 timeout = default_retry_ms;
1058 retries = default_max_retries;
1060 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1061 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1062 if (!intf->seq_table[i].inuse)
1066 if (!intf->seq_table[i].inuse) {
1067 intf->seq_table[i].recv_msg = recv_msg;
1070 * Start with the maximum timeout, when the send response
1071 * comes in we will start the real timer.
1073 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1074 intf->seq_table[i].orig_timeout = timeout;
1075 intf->seq_table[i].retries_left = retries;
1076 intf->seq_table[i].broadcast = broadcast;
1077 intf->seq_table[i].inuse = 1;
1078 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1080 *seqid = intf->seq_table[i].seqid;
1081 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1082 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1092 * Return the receive message for the given sequence number and
1093 * release the sequence number so it can be reused. Some other data
1094 * is passed in to be sure the message matches up correctly (to help
1095 * guard against message coming in after their timeout and the
1096 * sequence number being reused).
1098 static int intf_find_seq(struct ipmi_smi *intf,
1102 unsigned char netfn,
1103 struct ipmi_addr *addr,
1104 struct ipmi_recv_msg **recv_msg)
1107 unsigned long flags;
1109 if (seq >= IPMI_IPMB_NUM_SEQ)
1112 spin_lock_irqsave(&intf->seq_lock, flags);
1113 if (intf->seq_table[seq].inuse) {
1114 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1116 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1117 && (msg->msg.netfn == netfn)
1118 && (ipmi_addr_equal(addr, &msg->addr))) {
1120 intf->seq_table[seq].inuse = 0;
1121 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1125 spin_unlock_irqrestore(&intf->seq_lock, flags);
1131 /* Start the timer for a specific sequence table entry. */
1132 static int intf_start_seq_timer(struct ipmi_smi *intf,
1136 unsigned long flags;
1138 unsigned long seqid;
1141 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1143 spin_lock_irqsave(&intf->seq_lock, flags);
1145 * We do this verification because the user can be deleted
1146 * while a message is outstanding.
1148 if ((intf->seq_table[seq].inuse)
1149 && (intf->seq_table[seq].seqid == seqid)) {
1150 struct seq_table *ent = &intf->seq_table[seq];
1151 ent->timeout = ent->orig_timeout;
1154 spin_unlock_irqrestore(&intf->seq_lock, flags);
1159 /* Got an error for the send message for a specific sequence number. */
1160 static int intf_err_seq(struct ipmi_smi *intf,
1165 unsigned long flags;
1167 unsigned long seqid;
1168 struct ipmi_recv_msg *msg = NULL;
1171 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1173 spin_lock_irqsave(&intf->seq_lock, flags);
1175 * We do this verification because the user can be deleted
1176 * while a message is outstanding.
1178 if ((intf->seq_table[seq].inuse)
1179 && (intf->seq_table[seq].seqid == seqid)) {
1180 struct seq_table *ent = &intf->seq_table[seq];
1183 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1184 msg = ent->recv_msg;
1187 spin_unlock_irqrestore(&intf->seq_lock, flags);
1190 deliver_err_response(intf, msg, err);
1195 static void free_user_work(struct work_struct *work)
1197 struct ipmi_user *user = container_of(work, struct ipmi_user,
1200 cleanup_srcu_struct(&user->release_barrier);
1204 int ipmi_create_user(unsigned int if_num,
1205 const struct ipmi_user_hndl *handler,
1207 struct ipmi_user **user)
1209 unsigned long flags;
1210 struct ipmi_user *new_user;
1212 struct ipmi_smi *intf;
1215 * There is no module usecount here, because it's not
1216 * required. Since this can only be used by and called from
1217 * other modules, they will implicitly use this module, and
1218 * thus this can't be removed unless the other modules are
1222 if (handler == NULL)
1226 * Make sure the driver is actually initialized, this handles
1227 * problems with initialization order.
1229 rv = ipmi_init_msghandler();
1233 new_user = vzalloc(sizeof(*new_user));
1237 index = srcu_read_lock(&ipmi_interfaces_srcu);
1238 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1239 if (intf->intf_num == if_num)
1242 /* Not found, return an error */
1247 if (atomic_add_return(1, &intf->nr_users) > max_users) {
1252 INIT_WORK(&new_user->remove_work, free_user_work);
1254 rv = init_srcu_struct(&new_user->release_barrier);
1258 if (!try_module_get(intf->owner)) {
1263 /* Note that each existing user holds a refcount to the interface. */
1264 kref_get(&intf->refcount);
1266 atomic_set(&new_user->nr_msgs, 0);
1267 kref_init(&new_user->refcount);
1268 new_user->handler = handler;
1269 new_user->handler_data = handler_data;
1270 new_user->intf = intf;
1271 new_user->gets_events = false;
1273 rcu_assign_pointer(new_user->self, new_user);
1274 spin_lock_irqsave(&intf->seq_lock, flags);
1275 list_add_rcu(&new_user->link, &intf->users);
1276 spin_unlock_irqrestore(&intf->seq_lock, flags);
1277 if (handler->ipmi_watchdog_pretimeout)
1278 /* User wants pretimeouts, so make sure to watch for them. */
1279 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1280 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1285 atomic_dec(&intf->nr_users);
1286 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1290 EXPORT_SYMBOL(ipmi_create_user);
1292 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1295 struct ipmi_smi *intf;
1297 index = srcu_read_lock(&ipmi_interfaces_srcu);
1298 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1299 if (intf->intf_num == if_num)
1302 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1304 /* Not found, return an error */
1308 if (!intf->handlers->get_smi_info)
1311 rv = intf->handlers->get_smi_info(intf->send_info, data);
1312 srcu_read_unlock(&ipmi_interfaces_srcu, index);
1316 EXPORT_SYMBOL(ipmi_get_smi_info);
1318 static void free_user(struct kref *ref)
1320 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1322 /* SRCU cleanup must happen in task context. */
1323 queue_work(remove_work_wq, &user->remove_work);
1326 static void _ipmi_destroy_user(struct ipmi_user *user)
1328 struct ipmi_smi *intf = user->intf;
1330 unsigned long flags;
1331 struct cmd_rcvr *rcvr;
1332 struct cmd_rcvr *rcvrs = NULL;
1333 struct module *owner;
1335 if (!acquire_ipmi_user(user, &i)) {
1337 * The user has already been cleaned up, just make sure
1338 * nothing is using it and return.
1340 synchronize_srcu(&user->release_barrier);
1344 rcu_assign_pointer(user->self, NULL);
1345 release_ipmi_user(user, i);
1347 synchronize_srcu(&user->release_barrier);
1349 if (user->handler->shutdown)
1350 user->handler->shutdown(user->handler_data);
1352 if (user->handler->ipmi_watchdog_pretimeout)
1353 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1355 if (user->gets_events)
1356 atomic_dec(&intf->event_waiters);
1358 /* Remove the user from the interface's sequence table. */
1359 spin_lock_irqsave(&intf->seq_lock, flags);
1360 list_del_rcu(&user->link);
1361 atomic_dec(&intf->nr_users);
1363 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1364 if (intf->seq_table[i].inuse
1365 && (intf->seq_table[i].recv_msg->user == user)) {
1366 intf->seq_table[i].inuse = 0;
1367 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1368 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1371 spin_unlock_irqrestore(&intf->seq_lock, flags);
1374 * Remove the user from the command receiver's table. First
1375 * we build a list of everything (not using the standard link,
1376 * since other things may be using it till we do
1377 * synchronize_srcu()) then free everything in that list.
1379 mutex_lock(&intf->cmd_rcvrs_mutex);
1380 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1381 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1382 if (rcvr->user == user) {
1383 list_del_rcu(&rcvr->link);
1388 mutex_unlock(&intf->cmd_rcvrs_mutex);
1396 owner = intf->owner;
1397 kref_put(&intf->refcount, intf_free);
1401 int ipmi_destroy_user(struct ipmi_user *user)
1403 _ipmi_destroy_user(user);
1405 kref_put(&user->refcount, free_user);
1409 EXPORT_SYMBOL(ipmi_destroy_user);
1411 int ipmi_get_version(struct ipmi_user *user,
1412 unsigned char *major,
1413 unsigned char *minor)
1415 struct ipmi_device_id id;
1418 user = acquire_ipmi_user(user, &index);
1422 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1424 *major = ipmi_version_major(&id);
1425 *minor = ipmi_version_minor(&id);
1427 release_ipmi_user(user, index);
1431 EXPORT_SYMBOL(ipmi_get_version);
1433 int ipmi_set_my_address(struct ipmi_user *user,
1434 unsigned int channel,
1435 unsigned char address)
1439 user = acquire_ipmi_user(user, &index);
1443 if (channel >= IPMI_MAX_CHANNELS) {
1446 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1447 user->intf->addrinfo[channel].address = address;
1449 release_ipmi_user(user, index);
1453 EXPORT_SYMBOL(ipmi_set_my_address);
1455 int ipmi_get_my_address(struct ipmi_user *user,
1456 unsigned int channel,
1457 unsigned char *address)
1461 user = acquire_ipmi_user(user, &index);
1465 if (channel >= IPMI_MAX_CHANNELS) {
1468 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1469 *address = user->intf->addrinfo[channel].address;
1471 release_ipmi_user(user, index);
1475 EXPORT_SYMBOL(ipmi_get_my_address);
1477 int ipmi_set_my_LUN(struct ipmi_user *user,
1478 unsigned int channel,
1483 user = acquire_ipmi_user(user, &index);
1487 if (channel >= IPMI_MAX_CHANNELS) {
1490 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1491 user->intf->addrinfo[channel].lun = LUN & 0x3;
1493 release_ipmi_user(user, index);
1497 EXPORT_SYMBOL(ipmi_set_my_LUN);
1499 int ipmi_get_my_LUN(struct ipmi_user *user,
1500 unsigned int channel,
1501 unsigned char *address)
1505 user = acquire_ipmi_user(user, &index);
1509 if (channel >= IPMI_MAX_CHANNELS) {
1512 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1513 *address = user->intf->addrinfo[channel].lun;
1515 release_ipmi_user(user, index);
1519 EXPORT_SYMBOL(ipmi_get_my_LUN);
1521 int ipmi_get_maintenance_mode(struct ipmi_user *user)
1524 unsigned long flags;
1526 user = acquire_ipmi_user(user, &index);
1530 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1531 mode = user->intf->maintenance_mode;
1532 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1533 release_ipmi_user(user, index);
1537 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1539 static void maintenance_mode_update(struct ipmi_smi *intf)
1541 if (intf->handlers->set_maintenance_mode)
1542 intf->handlers->set_maintenance_mode(
1543 intf->send_info, intf->maintenance_mode_enable);
1546 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1549 unsigned long flags;
1550 struct ipmi_smi *intf = user->intf;
1552 user = acquire_ipmi_user(user, &index);
1556 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1557 if (intf->maintenance_mode != mode) {
1559 case IPMI_MAINTENANCE_MODE_AUTO:
1560 intf->maintenance_mode_enable
1561 = (intf->auto_maintenance_timeout > 0);
1564 case IPMI_MAINTENANCE_MODE_OFF:
1565 intf->maintenance_mode_enable = false;
1568 case IPMI_MAINTENANCE_MODE_ON:
1569 intf->maintenance_mode_enable = true;
1576 intf->maintenance_mode = mode;
1578 maintenance_mode_update(intf);
1581 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1582 release_ipmi_user(user, index);
1586 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1588 int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1590 unsigned long flags;
1591 struct ipmi_smi *intf = user->intf;
1592 struct ipmi_recv_msg *msg, *msg2;
1593 struct list_head msgs;
1596 user = acquire_ipmi_user(user, &index);
1600 INIT_LIST_HEAD(&msgs);
1602 spin_lock_irqsave(&intf->events_lock, flags);
1603 if (user->gets_events == val)
1606 user->gets_events = val;
1609 if (atomic_inc_return(&intf->event_waiters) == 1)
1612 atomic_dec(&intf->event_waiters);
1615 if (intf->delivering_events)
1617 * Another thread is delivering events for this, so
1618 * let it handle any new events.
1622 /* Deliver any queued events. */
1623 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1624 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1625 list_move_tail(&msg->link, &msgs);
1626 intf->waiting_events_count = 0;
1627 if (intf->event_msg_printed) {
1628 dev_warn(intf->si_dev, "Event queue no longer full\n");
1629 intf->event_msg_printed = 0;
1632 intf->delivering_events = 1;
1633 spin_unlock_irqrestore(&intf->events_lock, flags);
1635 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1637 kref_get(&user->refcount);
1638 deliver_local_response(intf, msg);
1641 spin_lock_irqsave(&intf->events_lock, flags);
1642 intf->delivering_events = 0;
1646 spin_unlock_irqrestore(&intf->events_lock, flags);
1647 release_ipmi_user(user, index);
1651 EXPORT_SYMBOL(ipmi_set_gets_events);
1653 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1654 unsigned char netfn,
1658 struct cmd_rcvr *rcvr;
1660 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1661 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1662 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1663 && (rcvr->chans & (1 << chan)))
1669 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1670 unsigned char netfn,
1674 struct cmd_rcvr *rcvr;
1676 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1677 lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1678 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1679 && (rcvr->chans & chans))
1685 int ipmi_register_for_cmd(struct ipmi_user *user,
1686 unsigned char netfn,
1690 struct ipmi_smi *intf = user->intf;
1691 struct cmd_rcvr *rcvr;
1694 user = acquire_ipmi_user(user, &index);
1698 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1704 rcvr->netfn = netfn;
1705 rcvr->chans = chans;
1708 mutex_lock(&intf->cmd_rcvrs_mutex);
1709 /* Make sure the command/netfn is not already registered. */
1710 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1715 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1717 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1720 mutex_unlock(&intf->cmd_rcvrs_mutex);
1724 release_ipmi_user(user, index);
1728 EXPORT_SYMBOL(ipmi_register_for_cmd);
1730 int ipmi_unregister_for_cmd(struct ipmi_user *user,
1731 unsigned char netfn,
1735 struct ipmi_smi *intf = user->intf;
1736 struct cmd_rcvr *rcvr;
1737 struct cmd_rcvr *rcvrs = NULL;
1738 int i, rv = -ENOENT, index;
1740 user = acquire_ipmi_user(user, &index);
1744 mutex_lock(&intf->cmd_rcvrs_mutex);
1745 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1746 if (((1 << i) & chans) == 0)
1748 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1751 if (rcvr->user == user) {
1753 rcvr->chans &= ~chans;
1754 if (rcvr->chans == 0) {
1755 list_del_rcu(&rcvr->link);
1761 mutex_unlock(&intf->cmd_rcvrs_mutex);
1763 release_ipmi_user(user, index);
1765 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1773 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1776 ipmb_checksum(unsigned char *data, int size)
1778 unsigned char csum = 0;
1780 for (; size > 0; size--, data++)
1785 EXPORT_SYMBOL(ipmb_checksum);
1787 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1788 struct kernel_ipmi_msg *msg,
1789 struct ipmi_ipmb_addr *ipmb_addr,
1791 unsigned char ipmb_seq,
1793 unsigned char source_address,
1794 unsigned char source_lun)
1798 /* Format the IPMB header data. */
1799 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1800 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1801 smi_msg->data[2] = ipmb_addr->channel;
1803 smi_msg->data[3] = 0;
1804 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1805 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1806 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1807 smi_msg->data[i+6] = source_address;
1808 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1809 smi_msg->data[i+8] = msg->cmd;
1811 /* Now tack on the data to the message. */
1812 if (msg->data_len > 0)
1813 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1814 smi_msg->data_size = msg->data_len + 9;
1816 /* Now calculate the checksum and tack it on. */
1817 smi_msg->data[i+smi_msg->data_size]
1818 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1821 * Add on the checksum size and the offset from the
1824 smi_msg->data_size += 1 + i;
1826 smi_msg->msgid = msgid;
1829 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1830 struct kernel_ipmi_msg *msg,
1831 struct ipmi_lan_addr *lan_addr,
1833 unsigned char ipmb_seq,
1834 unsigned char source_lun)
1836 /* Format the IPMB header data. */
1837 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1838 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1839 smi_msg->data[2] = lan_addr->channel;
1840 smi_msg->data[3] = lan_addr->session_handle;
1841 smi_msg->data[4] = lan_addr->remote_SWID;
1842 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1843 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1844 smi_msg->data[7] = lan_addr->local_SWID;
1845 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1846 smi_msg->data[9] = msg->cmd;
1848 /* Now tack on the data to the message. */
1849 if (msg->data_len > 0)
1850 memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1851 smi_msg->data_size = msg->data_len + 10;
1853 /* Now calculate the checksum and tack it on. */
1854 smi_msg->data[smi_msg->data_size]
1855 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1858 * Add on the checksum size and the offset from the
1861 smi_msg->data_size += 1;
1863 smi_msg->msgid = msgid;
1866 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1867 struct ipmi_smi_msg *smi_msg,
1870 if (intf->curr_msg) {
1872 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1874 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1877 intf->curr_msg = smi_msg;
1883 static void smi_send(struct ipmi_smi *intf,
1884 const struct ipmi_smi_handlers *handlers,
1885 struct ipmi_smi_msg *smi_msg, int priority)
1887 int run_to_completion = intf->run_to_completion;
1888 unsigned long flags = 0;
1890 if (!run_to_completion)
1891 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1892 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1894 if (!run_to_completion)
1895 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1898 handlers->sender(intf->send_info, smi_msg);
1901 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1903 return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1904 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1905 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1906 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1909 static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1910 struct ipmi_addr *addr,
1912 struct kernel_ipmi_msg *msg,
1913 struct ipmi_smi_msg *smi_msg,
1914 struct ipmi_recv_msg *recv_msg,
1916 unsigned int retry_time_ms)
1918 struct ipmi_system_interface_addr *smi_addr;
1921 /* Responses are not allowed to the SMI. */
1924 smi_addr = (struct ipmi_system_interface_addr *) addr;
1925 if (smi_addr->lun > 3) {
1926 ipmi_inc_stat(intf, sent_invalid_commands);
1930 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1932 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1933 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1934 || (msg->cmd == IPMI_GET_MSG_CMD)
1935 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1937 * We don't let the user do these, since we manage
1938 * the sequence numbers.
1940 ipmi_inc_stat(intf, sent_invalid_commands);
1944 if (is_maintenance_mode_cmd(msg)) {
1945 unsigned long flags;
1947 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1948 intf->auto_maintenance_timeout
1949 = maintenance_mode_timeout_ms;
1950 if (!intf->maintenance_mode
1951 && !intf->maintenance_mode_enable) {
1952 intf->maintenance_mode_enable = true;
1953 maintenance_mode_update(intf);
1955 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1959 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1960 ipmi_inc_stat(intf, sent_invalid_commands);
1964 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1965 smi_msg->data[1] = msg->cmd;
1966 smi_msg->msgid = msgid;
1967 smi_msg->user_data = recv_msg;
1968 if (msg->data_len > 0)
1969 memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1970 smi_msg->data_size = msg->data_len + 2;
1971 ipmi_inc_stat(intf, sent_local_commands);
1976 static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1977 struct ipmi_addr *addr,
1979 struct kernel_ipmi_msg *msg,
1980 struct ipmi_smi_msg *smi_msg,
1981 struct ipmi_recv_msg *recv_msg,
1982 unsigned char source_address,
1983 unsigned char source_lun,
1985 unsigned int retry_time_ms)
1987 struct ipmi_ipmb_addr *ipmb_addr;
1988 unsigned char ipmb_seq;
1991 struct ipmi_channel *chans;
1994 if (addr->channel >= IPMI_MAX_CHANNELS) {
1995 ipmi_inc_stat(intf, sent_invalid_commands);
1999 chans = READ_ONCE(intf->channel_list)->c;
2001 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
2002 ipmi_inc_stat(intf, sent_invalid_commands);
2006 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
2008 * Broadcasts add a zero at the beginning of the
2009 * message, but otherwise is the same as an IPMB
2012 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2014 retries = 0; /* Don't retry broadcasts. */
2018 * 9 for the header and 1 for the checksum, plus
2019 * possibly one for the broadcast.
2021 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
2022 ipmi_inc_stat(intf, sent_invalid_commands);
2026 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2027 if (ipmb_addr->lun > 3) {
2028 ipmi_inc_stat(intf, sent_invalid_commands);
2032 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2034 if (recv_msg->msg.netfn & 0x1) {
2036 * It's a response, so use the user's sequence
2039 ipmi_inc_stat(intf, sent_ipmb_responses);
2040 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2042 source_address, source_lun);
2045 * Save the receive message so we can use it
2046 * to deliver the response.
2048 smi_msg->user_data = recv_msg;
2050 /* It's a command, so get a sequence for it. */
2051 unsigned long flags;
2053 spin_lock_irqsave(&intf->seq_lock, flags);
2055 if (is_maintenance_mode_cmd(msg))
2056 intf->ipmb_maintenance_mode_timeout =
2057 maintenance_mode_timeout_ms;
2059 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2060 /* Different default in maintenance mode */
2061 retry_time_ms = default_maintenance_retry_ms;
2064 * Create a sequence number with a 1 second
2065 * timeout and 4 retries.
2067 rv = intf_next_seq(intf,
2076 * We have used up all the sequence numbers,
2077 * probably, so abort.
2081 ipmi_inc_stat(intf, sent_ipmb_commands);
2084 * Store the sequence number in the message,
2085 * so that when the send message response
2086 * comes back we can start the timer.
2088 format_ipmb_msg(smi_msg, msg, ipmb_addr,
2089 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2090 ipmb_seq, broadcast,
2091 source_address, source_lun);
2094 * Copy the message into the recv message data, so we
2095 * can retransmit it later if necessary.
2097 memcpy(recv_msg->msg_data, smi_msg->data,
2098 smi_msg->data_size);
2099 recv_msg->msg.data = recv_msg->msg_data;
2100 recv_msg->msg.data_len = smi_msg->data_size;
2103 * We don't unlock until here, because we need
2104 * to copy the completed message into the
2105 * recv_msg before we release the lock.
2106 * Otherwise, race conditions may bite us. I
2107 * know that's pretty paranoid, but I prefer
2111 spin_unlock_irqrestore(&intf->seq_lock, flags);
2117 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
2118 struct ipmi_addr *addr,
2120 struct kernel_ipmi_msg *msg,
2121 struct ipmi_smi_msg *smi_msg,
2122 struct ipmi_recv_msg *recv_msg,
2123 unsigned char source_lun)
2125 struct ipmi_ipmb_direct_addr *daddr;
2126 bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2128 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2129 return -EAFNOSUPPORT;
2131 /* Responses must have a completion code. */
2132 if (!is_cmd && msg->data_len < 1) {
2133 ipmi_inc_stat(intf, sent_invalid_commands);
2137 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2138 ipmi_inc_stat(intf, sent_invalid_commands);
2142 daddr = (struct ipmi_ipmb_direct_addr *) addr;
2143 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2144 ipmi_inc_stat(intf, sent_invalid_commands);
2148 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2149 smi_msg->msgid = msgid;
2152 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2153 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2155 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2156 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2158 smi_msg->data[1] = daddr->slave_addr;
2159 smi_msg->data[3] = msg->cmd;
2161 memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2162 smi_msg->data_size = msg->data_len + 4;
2164 smi_msg->user_data = recv_msg;
2169 static int i_ipmi_req_lan(struct ipmi_smi *intf,
2170 struct ipmi_addr *addr,
2172 struct kernel_ipmi_msg *msg,
2173 struct ipmi_smi_msg *smi_msg,
2174 struct ipmi_recv_msg *recv_msg,
2175 unsigned char source_lun,
2177 unsigned int retry_time_ms)
2179 struct ipmi_lan_addr *lan_addr;
2180 unsigned char ipmb_seq;
2182 struct ipmi_channel *chans;
2185 if (addr->channel >= IPMI_MAX_CHANNELS) {
2186 ipmi_inc_stat(intf, sent_invalid_commands);
2190 chans = READ_ONCE(intf->channel_list)->c;
2192 if ((chans[addr->channel].medium
2193 != IPMI_CHANNEL_MEDIUM_8023LAN)
2194 && (chans[addr->channel].medium
2195 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
2196 ipmi_inc_stat(intf, sent_invalid_commands);
2200 /* 11 for the header and 1 for the checksum. */
2201 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2202 ipmi_inc_stat(intf, sent_invalid_commands);
2206 lan_addr = (struct ipmi_lan_addr *) addr;
2207 if (lan_addr->lun > 3) {
2208 ipmi_inc_stat(intf, sent_invalid_commands);
2212 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2214 if (recv_msg->msg.netfn & 0x1) {
2216 * It's a response, so use the user's sequence
2219 ipmi_inc_stat(intf, sent_lan_responses);
2220 format_lan_msg(smi_msg, msg, lan_addr, msgid,
2224 * Save the receive message so we can use it
2225 * to deliver the response.
2227 smi_msg->user_data = recv_msg;
2229 /* It's a command, so get a sequence for it. */
2230 unsigned long flags;
2232 spin_lock_irqsave(&intf->seq_lock, flags);
2235 * Create a sequence number with a 1 second
2236 * timeout and 4 retries.
2238 rv = intf_next_seq(intf,
2247 * We have used up all the sequence numbers,
2248 * probably, so abort.
2252 ipmi_inc_stat(intf, sent_lan_commands);
2255 * Store the sequence number in the message,
2256 * so that when the send message response
2257 * comes back we can start the timer.
2259 format_lan_msg(smi_msg, msg, lan_addr,
2260 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2261 ipmb_seq, source_lun);
2264 * Copy the message into the recv message data, so we
2265 * can retransmit it later if necessary.
2267 memcpy(recv_msg->msg_data, smi_msg->data,
2268 smi_msg->data_size);
2269 recv_msg->msg.data = recv_msg->msg_data;
2270 recv_msg->msg.data_len = smi_msg->data_size;
2273 * We don't unlock until here, because we need
2274 * to copy the completed message into the
2275 * recv_msg before we release the lock.
2276 * Otherwise, race conditions may bite us. I
2277 * know that's pretty paranoid, but I prefer
2281 spin_unlock_irqrestore(&intf->seq_lock, flags);
2288 * Separate from ipmi_request so that the user does not have to be
2289 * supplied in certain circumstances (mainly at panic time). If
2290 * messages are supplied, they will be freed, even if an error
2293 static int i_ipmi_request(struct ipmi_user *user,
2294 struct ipmi_smi *intf,
2295 struct ipmi_addr *addr,
2297 struct kernel_ipmi_msg *msg,
2298 void *user_msg_data,
2300 struct ipmi_recv_msg *supplied_recv,
2302 unsigned char source_address,
2303 unsigned char source_lun,
2305 unsigned int retry_time_ms)
2307 struct ipmi_smi_msg *smi_msg;
2308 struct ipmi_recv_msg *recv_msg;
2312 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
2313 /* Decrement will happen at the end of the routine. */
2320 recv_msg = supplied_recv;
2322 recv_msg = ipmi_alloc_recv_msg();
2323 if (recv_msg == NULL) {
2328 recv_msg->user_msg_data = user_msg_data;
2331 smi_msg = supplied_smi;
2333 smi_msg = ipmi_alloc_smi_msg();
2334 if (smi_msg == NULL) {
2336 ipmi_free_recv_msg(recv_msg);
2343 if (intf->in_shutdown) {
2348 recv_msg->user = user;
2350 /* The put happens when the message is freed. */
2351 kref_get(&user->refcount);
2352 recv_msg->msgid = msgid;
2354 * Store the message to send in the receive message so timeout
2355 * responses can get the proper response data.
2357 recv_msg->msg = *msg;
2359 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2360 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2361 recv_msg, retries, retry_time_ms);
2362 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2363 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2364 source_address, source_lun,
2365 retries, retry_time_ms);
2366 } else if (is_ipmb_direct_addr(addr)) {
2367 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2368 recv_msg, source_lun);
2369 } else if (is_lan_addr(addr)) {
2370 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2371 source_lun, retries, retry_time_ms);
2373 /* Unknown address type. */
2374 ipmi_inc_stat(intf, sent_invalid_commands);
2380 ipmi_free_smi_msg(smi_msg);
2381 ipmi_free_recv_msg(recv_msg);
2383 dev_dbg(intf->si_dev, "Send: %*ph\n",
2384 smi_msg->data_size, smi_msg->data);
2386 smi_send(intf, intf->handlers, smi_msg, priority);
2392 atomic_dec(&user->nr_msgs);
2396 static int check_addr(struct ipmi_smi *intf,
2397 struct ipmi_addr *addr,
2398 unsigned char *saddr,
2401 if (addr->channel >= IPMI_MAX_CHANNELS)
2403 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2404 *lun = intf->addrinfo[addr->channel].lun;
2405 *saddr = intf->addrinfo[addr->channel].address;
2409 int ipmi_request_settime(struct ipmi_user *user,
2410 struct ipmi_addr *addr,
2412 struct kernel_ipmi_msg *msg,
2413 void *user_msg_data,
2416 unsigned int retry_time_ms)
2418 unsigned char saddr = 0, lun = 0;
2424 user = acquire_ipmi_user(user, &index);
2428 rv = check_addr(user->intf, addr, &saddr, &lun);
2430 rv = i_ipmi_request(user,
2443 release_ipmi_user(user, index);
2446 EXPORT_SYMBOL(ipmi_request_settime);
2448 int ipmi_request_supply_msgs(struct ipmi_user *user,
2449 struct ipmi_addr *addr,
2451 struct kernel_ipmi_msg *msg,
2452 void *user_msg_data,
2454 struct ipmi_recv_msg *supplied_recv,
2457 unsigned char saddr = 0, lun = 0;
2463 user = acquire_ipmi_user(user, &index);
2467 rv = check_addr(user->intf, addr, &saddr, &lun);
2469 rv = i_ipmi_request(user,
2482 release_ipmi_user(user, index);
2485 EXPORT_SYMBOL(ipmi_request_supply_msgs);
2487 static void bmc_device_id_handler(struct ipmi_smi *intf,
2488 struct ipmi_recv_msg *msg)
2492 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2493 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2494 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2495 dev_warn(intf->si_dev,
2496 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2497 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2501 if (msg->msg.data[0]) {
2502 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2504 intf->bmc->dyn_id_set = 0;
2508 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2509 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2511 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2512 /* record completion code when error */
2513 intf->bmc->cc = msg->msg.data[0];
2514 intf->bmc->dyn_id_set = 0;
2517 * Make sure the id data is available before setting
2521 intf->bmc->dyn_id_set = 1;
2524 wake_up(&intf->waitq);
2528 send_get_device_id_cmd(struct ipmi_smi *intf)
2530 struct ipmi_system_interface_addr si;
2531 struct kernel_ipmi_msg msg;
2533 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2534 si.channel = IPMI_BMC_CHANNEL;
2537 msg.netfn = IPMI_NETFN_APP_REQUEST;
2538 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2542 return i_ipmi_request(NULL,
2544 (struct ipmi_addr *) &si,
2551 intf->addrinfo[0].address,
2552 intf->addrinfo[0].lun,
2556 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2559 unsigned int retry_count = 0;
2561 intf->null_user_handler = bmc_device_id_handler;
2565 bmc->dyn_id_set = 2;
2567 rv = send_get_device_id_cmd(intf);
2569 goto out_reset_handler;
2571 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2573 if (!bmc->dyn_id_set) {
2574 if (bmc->cc != IPMI_CC_NO_ERROR &&
2575 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2577 dev_warn(intf->si_dev,
2578 "BMC returned 0x%2.2x, retry get bmc device id\n",
2583 rv = -EIO; /* Something went wrong in the fetch. */
2586 /* dyn_id_set makes the id data available. */
2590 intf->null_user_handler = NULL;
2596 * Fetch the device id for the bmc/interface. You must pass in either
2597 * bmc or intf, this code will get the other one. If the data has
2598 * been recently fetched, this will just use the cached data. Otherwise
2599 * it will run a new fetch.
2601 * Except for the first time this is called (in ipmi_add_smi()),
2602 * this will always return good data;
2604 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2605 struct ipmi_device_id *id,
2606 bool *guid_set, guid_t *guid, int intf_num)
2609 int prev_dyn_id_set, prev_guid_set;
2610 bool intf_set = intf != NULL;
2613 mutex_lock(&bmc->dyn_mutex);
2615 if (list_empty(&bmc->intfs)) {
2616 mutex_unlock(&bmc->dyn_mutex);
2619 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2621 kref_get(&intf->refcount);
2622 mutex_unlock(&bmc->dyn_mutex);
2623 mutex_lock(&intf->bmc_reg_mutex);
2624 mutex_lock(&bmc->dyn_mutex);
2625 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2627 mutex_unlock(&intf->bmc_reg_mutex);
2628 kref_put(&intf->refcount, intf_free);
2629 goto retry_bmc_lock;
2632 mutex_lock(&intf->bmc_reg_mutex);
2634 mutex_lock(&bmc->dyn_mutex);
2635 kref_get(&intf->refcount);
2638 /* If we have a valid and current ID, just return that. */
2639 if (intf->in_bmc_register ||
2640 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2641 goto out_noprocessing;
2643 prev_guid_set = bmc->dyn_guid_set;
2646 prev_dyn_id_set = bmc->dyn_id_set;
2647 rv = __get_device_id(intf, bmc);
2652 * The guid, device id, manufacturer id, and product id should
2653 * not change on a BMC. If it does we have to do some dancing.
2655 if (!intf->bmc_registered
2656 || (!prev_guid_set && bmc->dyn_guid_set)
2657 || (!prev_dyn_id_set && bmc->dyn_id_set)
2658 || (prev_guid_set && bmc->dyn_guid_set
2659 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2660 || bmc->id.device_id != bmc->fetch_id.device_id
2661 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2662 || bmc->id.product_id != bmc->fetch_id.product_id) {
2663 struct ipmi_device_id id = bmc->fetch_id;
2664 int guid_set = bmc->dyn_guid_set;
2667 guid = bmc->fetch_guid;
2668 mutex_unlock(&bmc->dyn_mutex);
2670 __ipmi_bmc_unregister(intf);
2671 /* Fill in the temporary BMC for good measure. */
2673 intf->bmc->dyn_guid_set = guid_set;
2674 intf->bmc->guid = guid;
2675 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2676 need_waiter(intf); /* Retry later on an error. */
2678 __scan_channels(intf, &id);
2683 * We weren't given the interface on the
2684 * command line, so restart the operation on
2685 * the next interface for the BMC.
2687 mutex_unlock(&intf->bmc_reg_mutex);
2688 mutex_lock(&bmc->dyn_mutex);
2689 goto retry_bmc_lock;
2692 /* We have a new BMC, set it up. */
2694 mutex_lock(&bmc->dyn_mutex);
2695 goto out_noprocessing;
2696 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2697 /* Version info changes, scan the channels again. */
2698 __scan_channels(intf, &bmc->fetch_id);
2700 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2703 if (rv && prev_dyn_id_set) {
2704 rv = 0; /* Ignore failures if we have previous data. */
2705 bmc->dyn_id_set = prev_dyn_id_set;
2708 bmc->id = bmc->fetch_id;
2709 if (bmc->dyn_guid_set)
2710 bmc->guid = bmc->fetch_guid;
2711 else if (prev_guid_set)
2713 * The guid used to be valid and it failed to fetch,
2714 * just use the cached value.
2716 bmc->dyn_guid_set = prev_guid_set;
2724 *guid_set = bmc->dyn_guid_set;
2726 if (guid && bmc->dyn_guid_set)
2730 mutex_unlock(&bmc->dyn_mutex);
2731 mutex_unlock(&intf->bmc_reg_mutex);
2733 kref_put(&intf->refcount, intf_free);
2737 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2738 struct ipmi_device_id *id,
2739 bool *guid_set, guid_t *guid)
2741 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2744 static ssize_t device_id_show(struct device *dev,
2745 struct device_attribute *attr,
2748 struct bmc_device *bmc = to_bmc_device(dev);
2749 struct ipmi_device_id id;
2752 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2756 return sysfs_emit(buf, "%u\n", id.device_id);
2758 static DEVICE_ATTR_RO(device_id);
2760 static ssize_t provides_device_sdrs_show(struct device *dev,
2761 struct device_attribute *attr,
2764 struct bmc_device *bmc = to_bmc_device(dev);
2765 struct ipmi_device_id id;
2768 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2772 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2774 static DEVICE_ATTR_RO(provides_device_sdrs);
2776 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2779 struct bmc_device *bmc = to_bmc_device(dev);
2780 struct ipmi_device_id id;
2783 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2787 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2789 static DEVICE_ATTR_RO(revision);
2791 static ssize_t firmware_revision_show(struct device *dev,
2792 struct device_attribute *attr,
2795 struct bmc_device *bmc = to_bmc_device(dev);
2796 struct ipmi_device_id id;
2799 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2803 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2804 id.firmware_revision_2);
2806 static DEVICE_ATTR_RO(firmware_revision);
2808 static ssize_t ipmi_version_show(struct device *dev,
2809 struct device_attribute *attr,
2812 struct bmc_device *bmc = to_bmc_device(dev);
2813 struct ipmi_device_id id;
2816 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2820 return sysfs_emit(buf, "%u.%u\n",
2821 ipmi_version_major(&id),
2822 ipmi_version_minor(&id));
2824 static DEVICE_ATTR_RO(ipmi_version);
2826 static ssize_t add_dev_support_show(struct device *dev,
2827 struct device_attribute *attr,
2830 struct bmc_device *bmc = to_bmc_device(dev);
2831 struct ipmi_device_id id;
2834 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2838 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2840 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2843 static ssize_t manufacturer_id_show(struct device *dev,
2844 struct device_attribute *attr,
2847 struct bmc_device *bmc = to_bmc_device(dev);
2848 struct ipmi_device_id id;
2851 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2855 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2857 static DEVICE_ATTR_RO(manufacturer_id);
2859 static ssize_t product_id_show(struct device *dev,
2860 struct device_attribute *attr,
2863 struct bmc_device *bmc = to_bmc_device(dev);
2864 struct ipmi_device_id id;
2867 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2871 return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2873 static DEVICE_ATTR_RO(product_id);
2875 static ssize_t aux_firmware_rev_show(struct device *dev,
2876 struct device_attribute *attr,
2879 struct bmc_device *bmc = to_bmc_device(dev);
2880 struct ipmi_device_id id;
2883 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2887 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2888 id.aux_firmware_revision[3],
2889 id.aux_firmware_revision[2],
2890 id.aux_firmware_revision[1],
2891 id.aux_firmware_revision[0]);
2893 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2895 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2898 struct bmc_device *bmc = to_bmc_device(dev);
2903 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2909 return sysfs_emit(buf, "%pUl\n", &guid);
2911 static DEVICE_ATTR_RO(guid);
2913 static struct attribute *bmc_dev_attrs[] = {
2914 &dev_attr_device_id.attr,
2915 &dev_attr_provides_device_sdrs.attr,
2916 &dev_attr_revision.attr,
2917 &dev_attr_firmware_revision.attr,
2918 &dev_attr_ipmi_version.attr,
2919 &dev_attr_additional_device_support.attr,
2920 &dev_attr_manufacturer_id.attr,
2921 &dev_attr_product_id.attr,
2922 &dev_attr_aux_firmware_revision.attr,
2923 &dev_attr_guid.attr,
2927 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2928 struct attribute *attr, int idx)
2930 struct device *dev = kobj_to_dev(kobj);
2931 struct bmc_device *bmc = to_bmc_device(dev);
2932 umode_t mode = attr->mode;
2935 if (attr == &dev_attr_aux_firmware_revision.attr) {
2936 struct ipmi_device_id id;
2938 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2939 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2941 if (attr == &dev_attr_guid.attr) {
2944 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2945 return (!rv && guid_set) ? mode : 0;
2950 static const struct attribute_group bmc_dev_attr_group = {
2951 .attrs = bmc_dev_attrs,
2952 .is_visible = bmc_dev_attr_is_visible,
2955 static const struct attribute_group *bmc_dev_attr_groups[] = {
2956 &bmc_dev_attr_group,
2960 static const struct device_type bmc_device_type = {
2961 .groups = bmc_dev_attr_groups,
2964 static int __find_bmc_guid(struct device *dev, const void *data)
2966 const guid_t *guid = data;
2967 struct bmc_device *bmc;
2970 if (dev->type != &bmc_device_type)
2973 bmc = to_bmc_device(dev);
2974 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2976 rv = kref_get_unless_zero(&bmc->usecount);
2981 * Returns with the bmc's usecount incremented, if it is non-NULL.
2983 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2987 struct bmc_device *bmc = NULL;
2989 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2991 bmc = to_bmc_device(dev);
2997 struct prod_dev_id {
2998 unsigned int product_id;
2999 unsigned char device_id;
3002 static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
3004 const struct prod_dev_id *cid = data;
3005 struct bmc_device *bmc;
3008 if (dev->type != &bmc_device_type)
3011 bmc = to_bmc_device(dev);
3012 rv = (bmc->id.product_id == cid->product_id
3013 && bmc->id.device_id == cid->device_id);
3015 rv = kref_get_unless_zero(&bmc->usecount);
3020 * Returns with the bmc's usecount incremented, if it is non-NULL.
3022 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
3023 struct device_driver *drv,
3024 unsigned int product_id, unsigned char device_id)
3026 struct prod_dev_id id = {
3027 .product_id = product_id,
3028 .device_id = device_id,
3031 struct bmc_device *bmc = NULL;
3033 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
3035 bmc = to_bmc_device(dev);
3041 static DEFINE_IDA(ipmi_bmc_ida);
3044 release_bmc_device(struct device *dev)
3046 kfree(to_bmc_device(dev));
3049 static void cleanup_bmc_work(struct work_struct *work)
3051 struct bmc_device *bmc = container_of(work, struct bmc_device,
3053 int id = bmc->pdev.id; /* Unregister overwrites id */
3055 platform_device_unregister(&bmc->pdev);
3056 ida_free(&ipmi_bmc_ida, id);
3060 cleanup_bmc_device(struct kref *ref)
3062 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3065 * Remove the platform device in a work queue to avoid issues
3066 * with removing the device attributes while reading a device
3069 queue_work(remove_work_wq, &bmc->remove_work);
3073 * Must be called with intf->bmc_reg_mutex held.
3075 static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3077 struct bmc_device *bmc = intf->bmc;
3079 if (!intf->bmc_registered)
3082 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3083 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3084 kfree(intf->my_dev_name);
3085 intf->my_dev_name = NULL;
3087 mutex_lock(&bmc->dyn_mutex);
3088 list_del(&intf->bmc_link);
3089 mutex_unlock(&bmc->dyn_mutex);
3090 intf->bmc = &intf->tmp_bmc;
3091 kref_put(&bmc->usecount, cleanup_bmc_device);
3092 intf->bmc_registered = false;
3095 static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3097 mutex_lock(&intf->bmc_reg_mutex);
3098 __ipmi_bmc_unregister(intf);
3099 mutex_unlock(&intf->bmc_reg_mutex);
3103 * Must be called with intf->bmc_reg_mutex held.
3105 static int __ipmi_bmc_register(struct ipmi_smi *intf,
3106 struct ipmi_device_id *id,
3107 bool guid_set, guid_t *guid, int intf_num)
3110 struct bmc_device *bmc;
3111 struct bmc_device *old_bmc;
3114 * platform_device_register() can cause bmc_reg_mutex to
3115 * be claimed because of the is_visible functions of
3116 * the attributes. Eliminate possible recursion and
3119 intf->in_bmc_register = true;
3120 mutex_unlock(&intf->bmc_reg_mutex);
3123 * Try to find if there is an bmc_device struct
3124 * representing the interfaced BMC already
3126 mutex_lock(&ipmidriver_mutex);
3128 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3130 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3135 * If there is already an bmc_device, free the new one,
3136 * otherwise register the new BMC device
3141 * Note: old_bmc already has usecount incremented by
3142 * the BMC find functions.
3144 intf->bmc = old_bmc;
3145 mutex_lock(&bmc->dyn_mutex);
3146 list_add_tail(&intf->bmc_link, &bmc->intfs);
3147 mutex_unlock(&bmc->dyn_mutex);
3149 dev_info(intf->si_dev,
3150 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3151 bmc->id.manufacturer_id,
3155 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3160 INIT_LIST_HEAD(&bmc->intfs);
3161 mutex_init(&bmc->dyn_mutex);
3162 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3165 bmc->dyn_id_set = 1;
3166 bmc->dyn_guid_set = guid_set;
3168 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3170 bmc->pdev.name = "ipmi_bmc";
3172 rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
3178 bmc->pdev.dev.driver = &ipmidriver.driver;
3180 bmc->pdev.dev.release = release_bmc_device;
3181 bmc->pdev.dev.type = &bmc_device_type;
3182 kref_init(&bmc->usecount);
3185 mutex_lock(&bmc->dyn_mutex);
3186 list_add_tail(&intf->bmc_link, &bmc->intfs);
3187 mutex_unlock(&bmc->dyn_mutex);
3189 rv = platform_device_register(&bmc->pdev);
3191 dev_err(intf->si_dev,
3192 "Unable to register bmc device: %d\n",
3197 dev_info(intf->si_dev,
3198 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3199 bmc->id.manufacturer_id,
3205 * create symlink from system interface device to bmc device
3208 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3210 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3215 intf_num = intf->intf_num;
3216 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3217 if (!intf->my_dev_name) {
3219 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3224 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3227 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3229 goto out_free_my_dev_name;
3232 intf->bmc_registered = true;
3235 mutex_unlock(&ipmidriver_mutex);
3236 mutex_lock(&intf->bmc_reg_mutex);
3237 intf->in_bmc_register = false;
3241 out_free_my_dev_name:
3242 kfree(intf->my_dev_name);
3243 intf->my_dev_name = NULL;
3246 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3249 mutex_lock(&bmc->dyn_mutex);
3250 list_del(&intf->bmc_link);
3251 mutex_unlock(&bmc->dyn_mutex);
3252 intf->bmc = &intf->tmp_bmc;
3253 kref_put(&bmc->usecount, cleanup_bmc_device);
3257 mutex_lock(&bmc->dyn_mutex);
3258 list_del(&intf->bmc_link);
3259 mutex_unlock(&bmc->dyn_mutex);
3260 intf->bmc = &intf->tmp_bmc;
3261 put_device(&bmc->pdev.dev);
3266 send_guid_cmd(struct ipmi_smi *intf, int chan)
3268 struct kernel_ipmi_msg msg;
3269 struct ipmi_system_interface_addr si;
3271 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3272 si.channel = IPMI_BMC_CHANNEL;
3275 msg.netfn = IPMI_NETFN_APP_REQUEST;
3276 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3279 return i_ipmi_request(NULL,
3281 (struct ipmi_addr *) &si,
3288 intf->addrinfo[0].address,
3289 intf->addrinfo[0].lun,
3293 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3295 struct bmc_device *bmc = intf->bmc;
3297 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3298 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3299 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3303 if (msg->msg.data[0] != 0) {
3304 /* Error from getting the GUID, the BMC doesn't have one. */
3305 bmc->dyn_guid_set = 0;
3309 if (msg->msg.data_len < UUID_SIZE + 1) {
3310 bmc->dyn_guid_set = 0;
3311 dev_warn(intf->si_dev,
3312 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3313 msg->msg.data_len, UUID_SIZE + 1);
3317 import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3319 * Make sure the guid data is available before setting
3323 bmc->dyn_guid_set = 1;
3325 wake_up(&intf->waitq);
3328 static void __get_guid(struct ipmi_smi *intf)
3331 struct bmc_device *bmc = intf->bmc;
3333 bmc->dyn_guid_set = 2;
3334 intf->null_user_handler = guid_handler;
3335 rv = send_guid_cmd(intf, 0);
3337 /* Send failed, no GUID available. */
3338 bmc->dyn_guid_set = 0;
3340 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3342 /* dyn_guid_set makes the guid data available. */
3345 intf->null_user_handler = NULL;
3349 send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3351 struct kernel_ipmi_msg msg;
3352 unsigned char data[1];
3353 struct ipmi_system_interface_addr si;
3355 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3356 si.channel = IPMI_BMC_CHANNEL;
3359 msg.netfn = IPMI_NETFN_APP_REQUEST;
3360 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3364 return i_ipmi_request(NULL,
3366 (struct ipmi_addr *) &si,
3373 intf->addrinfo[0].address,
3374 intf->addrinfo[0].lun,
3379 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3383 unsigned int set = intf->curr_working_cset;
3384 struct ipmi_channel *chans;
3386 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3387 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3388 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3389 /* It's the one we want */
3390 if (msg->msg.data[0] != 0) {
3391 /* Got an error from the channel, just go on. */
3392 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3394 * If the MC does not support this
3395 * command, that is legal. We just
3396 * assume it has one IPMB at channel
3399 intf->wchannels[set].c[0].medium
3400 = IPMI_CHANNEL_MEDIUM_IPMB;
3401 intf->wchannels[set].c[0].protocol
3402 = IPMI_CHANNEL_PROTOCOL_IPMB;
3404 intf->channel_list = intf->wchannels + set;
3405 intf->channels_ready = true;
3406 wake_up(&intf->waitq);
3411 if (msg->msg.data_len < 4) {
3412 /* Message not big enough, just go on. */
3415 ch = intf->curr_channel;
3416 chans = intf->wchannels[set].c;
3417 chans[ch].medium = msg->msg.data[2] & 0x7f;
3418 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3421 intf->curr_channel++;
3422 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3423 intf->channel_list = intf->wchannels + set;
3424 intf->channels_ready = true;
3425 wake_up(&intf->waitq);
3427 intf->channel_list = intf->wchannels + set;
3428 intf->channels_ready = true;
3429 rv = send_channel_info_cmd(intf, intf->curr_channel);
3433 /* Got an error somehow, just give up. */
3434 dev_warn(intf->si_dev,
3435 "Error sending channel information for channel %d: %d\n",
3436 intf->curr_channel, rv);
3438 intf->channel_list = intf->wchannels + set;
3439 intf->channels_ready = true;
3440 wake_up(&intf->waitq);
3448 * Must be holding intf->bmc_reg_mutex to call this.
3450 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3454 if (ipmi_version_major(id) > 1
3455 || (ipmi_version_major(id) == 1
3456 && ipmi_version_minor(id) >= 5)) {
3460 * Start scanning the channels to see what is
3463 set = !intf->curr_working_cset;
3464 intf->curr_working_cset = set;
3465 memset(&intf->wchannels[set], 0,
3466 sizeof(struct ipmi_channel_set));
3468 intf->null_user_handler = channel_handler;
3469 intf->curr_channel = 0;
3470 rv = send_channel_info_cmd(intf, 0);
3472 dev_warn(intf->si_dev,
3473 "Error sending channel information for channel 0, %d\n",
3475 intf->null_user_handler = NULL;
3479 /* Wait for the channel info to be read. */
3480 wait_event(intf->waitq, intf->channels_ready);
3481 intf->null_user_handler = NULL;
3483 unsigned int set = intf->curr_working_cset;
3485 /* Assume a single IPMB channel at zero. */
3486 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3487 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3488 intf->channel_list = intf->wchannels + set;
3489 intf->channels_ready = true;
3495 static void ipmi_poll(struct ipmi_smi *intf)
3497 if (intf->handlers->poll)
3498 intf->handlers->poll(intf->send_info);
3499 /* In case something came in */
3500 handle_new_recv_msgs(intf);
3503 void ipmi_poll_interface(struct ipmi_user *user)
3505 ipmi_poll(user->intf);
3507 EXPORT_SYMBOL(ipmi_poll_interface);
3509 static ssize_t nr_users_show(struct device *dev,
3510 struct device_attribute *attr,
3513 struct ipmi_smi *intf = container_of(attr,
3514 struct ipmi_smi, nr_users_devattr);
3516 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3518 static DEVICE_ATTR_RO(nr_users);
3520 static ssize_t nr_msgs_show(struct device *dev,
3521 struct device_attribute *attr,
3524 struct ipmi_smi *intf = container_of(attr,
3525 struct ipmi_smi, nr_msgs_devattr);
3526 struct ipmi_user *user;
3528 unsigned int count = 0;
3530 index = srcu_read_lock(&intf->users_srcu);
3531 list_for_each_entry_rcu(user, &intf->users, link)
3532 count += atomic_read(&user->nr_msgs);
3533 srcu_read_unlock(&intf->users_srcu, index);
3535 return sysfs_emit(buf, "%u\n", count);
3537 static DEVICE_ATTR_RO(nr_msgs);
3539 static void redo_bmc_reg(struct work_struct *work)
3541 struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3544 if (!intf->in_shutdown)
3545 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3547 kref_put(&intf->refcount, intf_free);
3550 int ipmi_add_smi(struct module *owner,
3551 const struct ipmi_smi_handlers *handlers,
3553 struct device *si_dev,
3554 unsigned char slave_addr)
3558 struct ipmi_smi *intf, *tintf;
3559 struct list_head *link;
3560 struct ipmi_device_id id;
3563 * Make sure the driver is actually initialized, this handles
3564 * problems with initialization order.
3566 rv = ipmi_init_msghandler();
3570 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3574 rv = init_srcu_struct(&intf->users_srcu);
3580 intf->owner = owner;
3581 intf->bmc = &intf->tmp_bmc;
3582 INIT_LIST_HEAD(&intf->bmc->intfs);
3583 mutex_init(&intf->bmc->dyn_mutex);
3584 INIT_LIST_HEAD(&intf->bmc_link);
3585 mutex_init(&intf->bmc_reg_mutex);
3586 intf->intf_num = -1; /* Mark it invalid for now. */
3587 kref_init(&intf->refcount);
3588 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3589 intf->si_dev = si_dev;
3590 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3591 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3592 intf->addrinfo[j].lun = 2;
3594 if (slave_addr != 0)
3595 intf->addrinfo[0].address = slave_addr;
3596 INIT_LIST_HEAD(&intf->users);
3597 atomic_set(&intf->nr_users, 0);
3598 intf->handlers = handlers;
3599 intf->send_info = send_info;
3600 spin_lock_init(&intf->seq_lock);
3601 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3602 intf->seq_table[j].inuse = 0;
3603 intf->seq_table[j].seqid = 0;
3606 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3607 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3608 tasklet_setup(&intf->recv_tasklet,
3610 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3611 spin_lock_init(&intf->xmit_msgs_lock);
3612 INIT_LIST_HEAD(&intf->xmit_msgs);
3613 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3614 spin_lock_init(&intf->events_lock);
3615 spin_lock_init(&intf->watch_lock);
3616 atomic_set(&intf->event_waiters, 0);
3617 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3618 INIT_LIST_HEAD(&intf->waiting_events);
3619 intf->waiting_events_count = 0;
3620 mutex_init(&intf->cmd_rcvrs_mutex);
3621 spin_lock_init(&intf->maintenance_mode_lock);
3622 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3623 init_waitqueue_head(&intf->waitq);
3624 for (i = 0; i < IPMI_NUM_STATS; i++)
3625 atomic_set(&intf->stats[i], 0);
3627 mutex_lock(&ipmi_interfaces_mutex);
3628 /* Look for a hole in the numbers. */
3630 link = &ipmi_interfaces;
3631 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3632 ipmi_interfaces_mutex_held()) {
3633 if (tintf->intf_num != i) {
3634 link = &tintf->link;
3639 /* Add the new interface in numeric order. */
3641 list_add_rcu(&intf->link, &ipmi_interfaces);
3643 list_add_tail_rcu(&intf->link, link);
3645 rv = handlers->start_processing(send_info, intf);
3649 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3651 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3652 goto out_err_started;
3655 mutex_lock(&intf->bmc_reg_mutex);
3656 rv = __scan_channels(intf, &id);
3657 mutex_unlock(&intf->bmc_reg_mutex);
3659 goto out_err_bmc_reg;
3661 intf->nr_users_devattr = dev_attr_nr_users;
3662 sysfs_attr_init(&intf->nr_users_devattr.attr);
3663 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3665 goto out_err_bmc_reg;
3667 intf->nr_msgs_devattr = dev_attr_nr_msgs;
3668 sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3669 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3671 device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3672 goto out_err_bmc_reg;
3676 * Keep memory order straight for RCU readers. Make
3677 * sure everything else is committed to memory before
3678 * setting intf_num to mark the interface valid.
3682 mutex_unlock(&ipmi_interfaces_mutex);
3684 /* After this point the interface is legal to use. */
3685 call_smi_watchers(i, intf->si_dev);
3690 ipmi_bmc_unregister(intf);
3692 if (intf->handlers->shutdown)
3693 intf->handlers->shutdown(intf->send_info);
3695 list_del_rcu(&intf->link);
3696 mutex_unlock(&ipmi_interfaces_mutex);
3697 synchronize_srcu(&ipmi_interfaces_srcu);
3698 cleanup_srcu_struct(&intf->users_srcu);
3699 kref_put(&intf->refcount, intf_free);
3703 EXPORT_SYMBOL(ipmi_add_smi);
3705 static void deliver_smi_err_response(struct ipmi_smi *intf,
3706 struct ipmi_smi_msg *msg,
3710 msg->rsp[0] = msg->data[0] | 4;
3711 msg->rsp[1] = msg->data[1];
3715 /* This will never requeue, but it may ask us to free the message. */
3716 rv = handle_one_recv_msg(intf, msg);
3718 ipmi_free_smi_msg(msg);
3721 static void cleanup_smi_msgs(struct ipmi_smi *intf)
3724 struct seq_table *ent;
3725 struct ipmi_smi_msg *msg;
3726 struct list_head *entry;
3727 struct list_head tmplist;
3729 /* Clear out our transmit queues and hold the messages. */
3730 INIT_LIST_HEAD(&tmplist);
3731 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3732 list_splice_tail(&intf->xmit_msgs, &tmplist);
3734 /* Current message first, to preserve order */
3735 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3736 /* Wait for the message to clear out. */
3737 schedule_timeout(1);
3740 /* No need for locks, the interface is down. */
3743 * Return errors for all pending messages in queue and in the
3744 * tables waiting for remote responses.
3746 while (!list_empty(&tmplist)) {
3747 entry = tmplist.next;
3749 msg = list_entry(entry, struct ipmi_smi_msg, link);
3750 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3753 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3754 ent = &intf->seq_table[i];
3757 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3761 void ipmi_unregister_smi(struct ipmi_smi *intf)
3763 struct ipmi_smi_watcher *w;
3764 int intf_num, index;
3768 intf_num = intf->intf_num;
3769 mutex_lock(&ipmi_interfaces_mutex);
3770 intf->intf_num = -1;
3771 intf->in_shutdown = true;
3772 list_del_rcu(&intf->link);
3773 mutex_unlock(&ipmi_interfaces_mutex);
3774 synchronize_srcu(&ipmi_interfaces_srcu);
3776 /* At this point no users can be added to the interface. */
3778 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3779 device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3782 * Call all the watcher interfaces to tell them that
3783 * an interface is going away.
3785 mutex_lock(&smi_watchers_mutex);
3786 list_for_each_entry(w, &smi_watchers, link)
3787 w->smi_gone(intf_num);
3788 mutex_unlock(&smi_watchers_mutex);
3790 index = srcu_read_lock(&intf->users_srcu);
3791 while (!list_empty(&intf->users)) {
3792 struct ipmi_user *user =
3793 container_of(list_next_rcu(&intf->users),
3794 struct ipmi_user, link);
3796 _ipmi_destroy_user(user);
3798 srcu_read_unlock(&intf->users_srcu, index);
3800 if (intf->handlers->shutdown)
3801 intf->handlers->shutdown(intf->send_info);
3803 cleanup_smi_msgs(intf);
3805 ipmi_bmc_unregister(intf);
3807 cleanup_srcu_struct(&intf->users_srcu);
3808 kref_put(&intf->refcount, intf_free);
3810 EXPORT_SYMBOL(ipmi_unregister_smi);
3812 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3813 struct ipmi_smi_msg *msg)
3815 struct ipmi_ipmb_addr ipmb_addr;
3816 struct ipmi_recv_msg *recv_msg;
3819 * This is 11, not 10, because the response must contain a
3822 if (msg->rsp_size < 11) {
3823 /* Message not big enough, just ignore it. */
3824 ipmi_inc_stat(intf, invalid_ipmb_responses);
3828 if (msg->rsp[2] != 0) {
3829 /* An error getting the response, just ignore it. */
3833 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3834 ipmb_addr.slave_addr = msg->rsp[6];
3835 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3836 ipmb_addr.lun = msg->rsp[7] & 3;
3839 * It's a response from a remote entity. Look up the sequence
3840 * number and handle the response.
3842 if (intf_find_seq(intf,
3846 (msg->rsp[4] >> 2) & (~1),
3847 (struct ipmi_addr *) &ipmb_addr,
3850 * We were unable to find the sequence number,
3851 * so just nuke the message.
3853 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3857 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3859 * The other fields matched, so no need to set them, except
3860 * for netfn, which needs to be the response that was
3861 * returned, not the request value.
3863 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3864 recv_msg->msg.data = recv_msg->msg_data;
3865 recv_msg->msg.data_len = msg->rsp_size - 10;
3866 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3867 if (deliver_response(intf, recv_msg))
3868 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3870 ipmi_inc_stat(intf, handled_ipmb_responses);
3875 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3876 struct ipmi_smi_msg *msg)
3878 struct cmd_rcvr *rcvr;
3880 unsigned char netfn;
3883 struct ipmi_user *user = NULL;
3884 struct ipmi_ipmb_addr *ipmb_addr;
3885 struct ipmi_recv_msg *recv_msg;
3887 if (msg->rsp_size < 10) {
3888 /* Message not big enough, just ignore it. */
3889 ipmi_inc_stat(intf, invalid_commands);
3893 if (msg->rsp[2] != 0) {
3894 /* An error getting the response, just ignore it. */
3898 netfn = msg->rsp[4] >> 2;
3900 chan = msg->rsp[3] & 0xf;
3903 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3906 kref_get(&user->refcount);
3912 /* We didn't find a user, deliver an error response. */
3913 ipmi_inc_stat(intf, unhandled_commands);
3915 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3916 msg->data[1] = IPMI_SEND_MSG_CMD;
3917 msg->data[2] = msg->rsp[3];
3918 msg->data[3] = msg->rsp[6];
3919 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3920 msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3921 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3923 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3924 msg->data[8] = msg->rsp[8]; /* cmd */
3925 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3926 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3927 msg->data_size = 11;
3929 dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3930 msg->data_size, msg->data);
3933 if (!intf->in_shutdown) {
3934 smi_send(intf, intf->handlers, msg, 0);
3936 * We used the message, so return the value
3937 * that causes it to not be freed or
3944 recv_msg = ipmi_alloc_recv_msg();
3947 * We couldn't allocate memory for the
3948 * message, so requeue it for handling
3952 kref_put(&user->refcount, free_user);
3954 /* Extract the source address from the data. */
3955 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3956 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3957 ipmb_addr->slave_addr = msg->rsp[6];
3958 ipmb_addr->lun = msg->rsp[7] & 3;
3959 ipmb_addr->channel = msg->rsp[3] & 0xf;
3962 * Extract the rest of the message information
3963 * from the IPMB header.
3965 recv_msg->user = user;
3966 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3967 recv_msg->msgid = msg->rsp[7] >> 2;
3968 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3969 recv_msg->msg.cmd = msg->rsp[8];
3970 recv_msg->msg.data = recv_msg->msg_data;
3973 * We chop off 10, not 9 bytes because the checksum
3974 * at the end also needs to be removed.
3976 recv_msg->msg.data_len = msg->rsp_size - 10;
3977 memcpy(recv_msg->msg_data, &msg->rsp[9],
3978 msg->rsp_size - 10);
3979 if (deliver_response(intf, recv_msg))
3980 ipmi_inc_stat(intf, unhandled_commands);
3982 ipmi_inc_stat(intf, handled_commands);
3989 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
3990 struct ipmi_smi_msg *msg)
3992 struct cmd_rcvr *rcvr;
3994 struct ipmi_user *user = NULL;
3995 struct ipmi_ipmb_direct_addr *daddr;
3996 struct ipmi_recv_msg *recv_msg;
3997 unsigned char netfn = msg->rsp[0] >> 2;
3998 unsigned char cmd = msg->rsp[3];
4001 /* We always use channel 0 for direct messages. */
4002 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
4005 kref_get(&user->refcount);
4011 /* We didn't find a user, deliver an error response. */
4012 ipmi_inc_stat(intf, unhandled_commands);
4014 msg->data[0] = (netfn + 1) << 2;
4015 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
4016 msg->data[1] = msg->rsp[1]; /* Addr */
4017 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
4018 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
4020 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
4024 if (!intf->in_shutdown) {
4025 smi_send(intf, intf->handlers, msg, 0);
4027 * We used the message, so return the value
4028 * that causes it to not be freed or
4035 recv_msg = ipmi_alloc_recv_msg();
4038 * We couldn't allocate memory for the
4039 * message, so requeue it for handling
4043 kref_put(&user->refcount, free_user);
4045 /* Extract the source address from the data. */
4046 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
4047 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4049 daddr->slave_addr = msg->rsp[1];
4050 daddr->rs_lun = msg->rsp[0] & 3;
4051 daddr->rq_lun = msg->rsp[2] & 3;
4054 * Extract the rest of the message information
4055 * from the IPMB header.
4057 recv_msg->user = user;
4058 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4059 recv_msg->msgid = (msg->rsp[2] >> 2);
4060 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4061 recv_msg->msg.cmd = msg->rsp[3];
4062 recv_msg->msg.data = recv_msg->msg_data;
4064 recv_msg->msg.data_len = msg->rsp_size - 4;
4065 memcpy(recv_msg->msg_data, msg->rsp + 4,
4067 if (deliver_response(intf, recv_msg))
4068 ipmi_inc_stat(intf, unhandled_commands);
4070 ipmi_inc_stat(intf, handled_commands);
4077 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4078 struct ipmi_smi_msg *msg)
4080 struct ipmi_recv_msg *recv_msg;
4081 struct ipmi_ipmb_direct_addr *daddr;
4083 recv_msg = msg->user_data;
4084 if (recv_msg == NULL) {
4085 dev_warn(intf->si_dev,
4086 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4090 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4091 recv_msg->msgid = msg->msgid;
4092 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4093 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4095 daddr->slave_addr = msg->rsp[1];
4096 daddr->rq_lun = msg->rsp[0] & 3;
4097 daddr->rs_lun = msg->rsp[2] & 3;
4098 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4099 recv_msg->msg.cmd = msg->rsp[3];
4100 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4101 recv_msg->msg.data = recv_msg->msg_data;
4102 recv_msg->msg.data_len = msg->rsp_size - 4;
4103 deliver_local_response(intf, recv_msg);
4108 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4109 struct ipmi_smi_msg *msg)
4111 struct ipmi_lan_addr lan_addr;
4112 struct ipmi_recv_msg *recv_msg;
4116 * This is 13, not 12, because the response must contain a
4119 if (msg->rsp_size < 13) {
4120 /* Message not big enough, just ignore it. */
4121 ipmi_inc_stat(intf, invalid_lan_responses);
4125 if (msg->rsp[2] != 0) {
4126 /* An error getting the response, just ignore it. */
4130 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4131 lan_addr.session_handle = msg->rsp[4];
4132 lan_addr.remote_SWID = msg->rsp[8];
4133 lan_addr.local_SWID = msg->rsp[5];
4134 lan_addr.channel = msg->rsp[3] & 0x0f;
4135 lan_addr.privilege = msg->rsp[3] >> 4;
4136 lan_addr.lun = msg->rsp[9] & 3;
4139 * It's a response from a remote entity. Look up the sequence
4140 * number and handle the response.
4142 if (intf_find_seq(intf,
4146 (msg->rsp[6] >> 2) & (~1),
4147 (struct ipmi_addr *) &lan_addr,
4150 * We were unable to find the sequence number,
4151 * so just nuke the message.
4153 ipmi_inc_stat(intf, unhandled_lan_responses);
4157 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4159 * The other fields matched, so no need to set them, except
4160 * for netfn, which needs to be the response that was
4161 * returned, not the request value.
4163 recv_msg->msg.netfn = msg->rsp[6] >> 2;
4164 recv_msg->msg.data = recv_msg->msg_data;
4165 recv_msg->msg.data_len = msg->rsp_size - 12;
4166 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4167 if (deliver_response(intf, recv_msg))
4168 ipmi_inc_stat(intf, unhandled_lan_responses);
4170 ipmi_inc_stat(intf, handled_lan_responses);
4175 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4176 struct ipmi_smi_msg *msg)
4178 struct cmd_rcvr *rcvr;
4180 unsigned char netfn;
4183 struct ipmi_user *user = NULL;
4184 struct ipmi_lan_addr *lan_addr;
4185 struct ipmi_recv_msg *recv_msg;
4187 if (msg->rsp_size < 12) {
4188 /* Message not big enough, just ignore it. */
4189 ipmi_inc_stat(intf, invalid_commands);
4193 if (msg->rsp[2] != 0) {
4194 /* An error getting the response, just ignore it. */
4198 netfn = msg->rsp[6] >> 2;
4200 chan = msg->rsp[3] & 0xf;
4203 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4206 kref_get(&user->refcount);
4212 /* We didn't find a user, just give up. */
4213 ipmi_inc_stat(intf, unhandled_commands);
4216 * Don't do anything with these messages, just allow
4221 recv_msg = ipmi_alloc_recv_msg();
4224 * We couldn't allocate memory for the
4225 * message, so requeue it for handling later.
4228 kref_put(&user->refcount, free_user);
4230 /* Extract the source address from the data. */
4231 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4232 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4233 lan_addr->session_handle = msg->rsp[4];
4234 lan_addr->remote_SWID = msg->rsp[8];
4235 lan_addr->local_SWID = msg->rsp[5];
4236 lan_addr->lun = msg->rsp[9] & 3;
4237 lan_addr->channel = msg->rsp[3] & 0xf;
4238 lan_addr->privilege = msg->rsp[3] >> 4;
4241 * Extract the rest of the message information
4242 * from the IPMB header.
4244 recv_msg->user = user;
4245 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4246 recv_msg->msgid = msg->rsp[9] >> 2;
4247 recv_msg->msg.netfn = msg->rsp[6] >> 2;
4248 recv_msg->msg.cmd = msg->rsp[10];
4249 recv_msg->msg.data = recv_msg->msg_data;
4252 * We chop off 12, not 11 bytes because the checksum
4253 * at the end also needs to be removed.
4255 recv_msg->msg.data_len = msg->rsp_size - 12;
4256 memcpy(recv_msg->msg_data, &msg->rsp[11],
4257 msg->rsp_size - 12);
4258 if (deliver_response(intf, recv_msg))
4259 ipmi_inc_stat(intf, unhandled_commands);
4261 ipmi_inc_stat(intf, handled_commands);
4269 * This routine will handle "Get Message" command responses with
4270 * channels that use an OEM Medium. The message format belongs to
4271 * the OEM. See IPMI 2.0 specification, Chapter 6 and
4272 * Chapter 22, sections 22.6 and 22.24 for more details.
4274 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4275 struct ipmi_smi_msg *msg)
4277 struct cmd_rcvr *rcvr;
4279 unsigned char netfn;
4282 struct ipmi_user *user = NULL;
4283 struct ipmi_system_interface_addr *smi_addr;
4284 struct ipmi_recv_msg *recv_msg;
4287 * We expect the OEM SW to perform error checking
4288 * so we just do some basic sanity checks
4290 if (msg->rsp_size < 4) {
4291 /* Message not big enough, just ignore it. */
4292 ipmi_inc_stat(intf, invalid_commands);
4296 if (msg->rsp[2] != 0) {
4297 /* An error getting the response, just ignore it. */
4302 * This is an OEM Message so the OEM needs to know how
4303 * handle the message. We do no interpretation.
4305 netfn = msg->rsp[0] >> 2;
4307 chan = msg->rsp[3] & 0xf;
4310 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4313 kref_get(&user->refcount);
4319 /* We didn't find a user, just give up. */
4320 ipmi_inc_stat(intf, unhandled_commands);
4323 * Don't do anything with these messages, just allow
4329 recv_msg = ipmi_alloc_recv_msg();
4332 * We couldn't allocate memory for the
4333 * message, so requeue it for handling
4337 kref_put(&user->refcount, free_user);
4340 * OEM Messages are expected to be delivered via
4341 * the system interface to SMS software. We might
4342 * need to visit this again depending on OEM
4345 smi_addr = ((struct ipmi_system_interface_addr *)
4347 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4348 smi_addr->channel = IPMI_BMC_CHANNEL;
4349 smi_addr->lun = msg->rsp[0] & 3;
4351 recv_msg->user = user;
4352 recv_msg->user_msg_data = NULL;
4353 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4354 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4355 recv_msg->msg.cmd = msg->rsp[1];
4356 recv_msg->msg.data = recv_msg->msg_data;
4359 * The message starts at byte 4 which follows the
4360 * Channel Byte in the "GET MESSAGE" command
4362 recv_msg->msg.data_len = msg->rsp_size - 4;
4363 memcpy(recv_msg->msg_data, &msg->rsp[4],
4365 if (deliver_response(intf, recv_msg))
4366 ipmi_inc_stat(intf, unhandled_commands);
4368 ipmi_inc_stat(intf, handled_commands);
4375 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4376 struct ipmi_smi_msg *msg)
4378 struct ipmi_system_interface_addr *smi_addr;
4380 recv_msg->msgid = 0;
4381 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4382 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4383 smi_addr->channel = IPMI_BMC_CHANNEL;
4384 smi_addr->lun = msg->rsp[0] & 3;
4385 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4386 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4387 recv_msg->msg.cmd = msg->rsp[1];
4388 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4389 recv_msg->msg.data = recv_msg->msg_data;
4390 recv_msg->msg.data_len = msg->rsp_size - 3;
4393 static int handle_read_event_rsp(struct ipmi_smi *intf,
4394 struct ipmi_smi_msg *msg)
4396 struct ipmi_recv_msg *recv_msg, *recv_msg2;
4397 struct list_head msgs;
4398 struct ipmi_user *user;
4399 int rv = 0, deliver_count = 0, index;
4400 unsigned long flags;
4402 if (msg->rsp_size < 19) {
4403 /* Message is too small to be an IPMB event. */
4404 ipmi_inc_stat(intf, invalid_events);
4408 if (msg->rsp[2] != 0) {
4409 /* An error getting the event, just ignore it. */
4413 INIT_LIST_HEAD(&msgs);
4415 spin_lock_irqsave(&intf->events_lock, flags);
4417 ipmi_inc_stat(intf, events);
4420 * Allocate and fill in one message for every user that is
4423 index = srcu_read_lock(&intf->users_srcu);
4424 list_for_each_entry_rcu(user, &intf->users, link) {
4425 if (!user->gets_events)
4428 recv_msg = ipmi_alloc_recv_msg();
4431 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4433 list_del(&recv_msg->link);
4434 ipmi_free_recv_msg(recv_msg);
4437 * We couldn't allocate memory for the
4438 * message, so requeue it for handling
4447 copy_event_into_recv_msg(recv_msg, msg);
4448 recv_msg->user = user;
4449 kref_get(&user->refcount);
4450 list_add_tail(&recv_msg->link, &msgs);
4452 srcu_read_unlock(&intf->users_srcu, index);
4454 if (deliver_count) {
4455 /* Now deliver all the messages. */
4456 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4457 list_del(&recv_msg->link);
4458 deliver_local_response(intf, recv_msg);
4460 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4462 * No one to receive the message, put it in queue if there's
4463 * not already too many things in the queue.
4465 recv_msg = ipmi_alloc_recv_msg();
4468 * We couldn't allocate memory for the
4469 * message, so requeue it for handling
4476 copy_event_into_recv_msg(recv_msg, msg);
4477 list_add_tail(&recv_msg->link, &intf->waiting_events);
4478 intf->waiting_events_count++;
4479 } else if (!intf->event_msg_printed) {
4481 * There's too many things in the queue, discard this
4484 dev_warn(intf->si_dev,
4485 "Event queue full, discarding incoming events\n");
4486 intf->event_msg_printed = 1;
4490 spin_unlock_irqrestore(&intf->events_lock, flags);
4495 static int handle_bmc_rsp(struct ipmi_smi *intf,
4496 struct ipmi_smi_msg *msg)
4498 struct ipmi_recv_msg *recv_msg;
4499 struct ipmi_system_interface_addr *smi_addr;
4501 recv_msg = msg->user_data;
4502 if (recv_msg == NULL) {
4503 dev_warn(intf->si_dev,
4504 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4508 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4509 recv_msg->msgid = msg->msgid;
4510 smi_addr = ((struct ipmi_system_interface_addr *)
4512 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4513 smi_addr->channel = IPMI_BMC_CHANNEL;
4514 smi_addr->lun = msg->rsp[0] & 3;
4515 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4516 recv_msg->msg.cmd = msg->rsp[1];
4517 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4518 recv_msg->msg.data = recv_msg->msg_data;
4519 recv_msg->msg.data_len = msg->rsp_size - 2;
4520 deliver_local_response(intf, recv_msg);
4526 * Handle a received message. Return 1 if the message should be requeued,
4527 * 0 if the message should be freed, or -1 if the message should not
4528 * be freed or requeued.
4530 static int handle_one_recv_msg(struct ipmi_smi *intf,
4531 struct ipmi_smi_msg *msg)
4536 bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4538 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4540 if (msg->rsp_size < 2) {
4541 /* Message is too small to be correct. */
4542 dev_warn(intf->si_dev,
4543 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4544 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4547 /* Generate an error response for the message. */
4548 msg->rsp[0] = msg->data[0] | (1 << 2);
4549 msg->rsp[1] = msg->data[1];
4550 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4552 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4553 /* commands must have at least 4 bytes, responses 5. */
4554 if (is_cmd && (msg->rsp_size < 4)) {
4555 ipmi_inc_stat(intf, invalid_commands);
4558 if (!is_cmd && (msg->rsp_size < 5)) {
4559 ipmi_inc_stat(intf, invalid_ipmb_responses);
4560 /* Construct a valid error response. */
4561 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4562 msg->rsp[0] |= (1 << 2); /* Make it a response */
4563 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4564 msg->rsp[1] = msg->data[1]; /* Addr */
4565 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4566 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4567 msg->rsp[3] = msg->data[3]; /* Cmd */
4568 msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4571 } else if ((msg->data_size >= 2)
4572 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4573 && (msg->data[1] == IPMI_SEND_MSG_CMD)
4574 && (msg->user_data == NULL)) {
4576 if (intf->in_shutdown)
4580 * This is the local response to a command send, start
4581 * the timer for these. The user_data will not be
4582 * NULL if this is a response send, and we will let
4583 * response sends just go through.
4587 * Check for errors, if we get certain errors (ones
4588 * that mean basically we can try again later), we
4589 * ignore them and start the timer. Otherwise we
4590 * report the error immediately.
4592 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4593 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4594 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4595 && (msg->rsp[2] != IPMI_BUS_ERR)
4596 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4597 int ch = msg->rsp[3] & 0xf;
4598 struct ipmi_channel *chans;
4600 /* Got an error sending the message, handle it. */
4602 chans = READ_ONCE(intf->channel_list)->c;
4603 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4604 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4605 ipmi_inc_stat(intf, sent_lan_command_errs);
4607 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4608 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4610 /* The message was sent, start the timer. */
4611 intf_start_seq_timer(intf, msg->msgid);
4614 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4615 || (msg->rsp[1] != msg->data[1])) {
4617 * The NetFN and Command in the response is not even
4618 * marginally correct.
4620 dev_warn(intf->si_dev,
4621 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4622 (msg->data[0] >> 2) | 1, msg->data[1],
4623 msg->rsp[0] >> 2, msg->rsp[1]);
4625 goto return_unspecified;
4628 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4629 if ((msg->data[0] >> 2) & 1) {
4630 /* It's a response to a sent response. */
4633 goto process_response_response;
4636 requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4638 requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4639 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4640 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4641 && (msg->user_data != NULL)) {
4643 * It's a response to a response we sent. For this we
4644 * deliver a send message response to the user.
4646 struct ipmi_recv_msg *recv_msg;
4648 chan = msg->data[2] & 0x0f;
4649 if (chan >= IPMI_MAX_CHANNELS)
4650 /* Invalid channel number */
4654 process_response_response:
4655 recv_msg = msg->user_data;
4661 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4662 recv_msg->msg.data = recv_msg->msg_data;
4663 recv_msg->msg_data[0] = cc;
4664 recv_msg->msg.data_len = 1;
4665 deliver_local_response(intf, recv_msg);
4666 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4667 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4668 struct ipmi_channel *chans;
4670 /* It's from the receive queue. */
4671 chan = msg->rsp[3] & 0xf;
4672 if (chan >= IPMI_MAX_CHANNELS) {
4673 /* Invalid channel number */
4679 * We need to make sure the channels have been initialized.
4680 * The channel_handler routine will set the "curr_channel"
4681 * equal to or greater than IPMI_MAX_CHANNELS when all the
4682 * channels for this interface have been initialized.
4684 if (!intf->channels_ready) {
4685 requeue = 0; /* Throw the message away */
4689 chans = READ_ONCE(intf->channel_list)->c;
4691 switch (chans[chan].medium) {
4692 case IPMI_CHANNEL_MEDIUM_IPMB:
4693 if (msg->rsp[4] & 0x04) {
4695 * It's a response, so find the
4696 * requesting message and send it up.
4698 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4701 * It's a command to the SMS from some other
4702 * entity. Handle that.
4704 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4708 case IPMI_CHANNEL_MEDIUM_8023LAN:
4709 case IPMI_CHANNEL_MEDIUM_ASYNC:
4710 if (msg->rsp[6] & 0x04) {
4712 * It's a response, so find the
4713 * requesting message and send it up.
4715 requeue = handle_lan_get_msg_rsp(intf, msg);
4718 * It's a command to the SMS from some other
4719 * entity. Handle that.
4721 requeue = handle_lan_get_msg_cmd(intf, msg);
4726 /* Check for OEM Channels. Clients had better
4727 register for these commands. */
4728 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4729 && (chans[chan].medium
4730 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4731 requeue = handle_oem_get_msg_cmd(intf, msg);
4734 * We don't handle the channel type, so just
4741 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4742 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4743 /* It's an asynchronous event. */
4744 requeue = handle_read_event_rsp(intf, msg);
4746 /* It's a response from the local BMC. */
4747 requeue = handle_bmc_rsp(intf, msg);
4755 * If there are messages in the queue or pretimeouts, handle them.
4757 static void handle_new_recv_msgs(struct ipmi_smi *intf)
4759 struct ipmi_smi_msg *smi_msg;
4760 unsigned long flags = 0;
4762 int run_to_completion = intf->run_to_completion;
4764 /* See if any waiting messages need to be processed. */
4765 if (!run_to_completion)
4766 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4767 while (!list_empty(&intf->waiting_rcv_msgs)) {
4768 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4769 struct ipmi_smi_msg, link);
4770 list_del(&smi_msg->link);
4771 if (!run_to_completion)
4772 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4774 rv = handle_one_recv_msg(intf, smi_msg);
4775 if (!run_to_completion)
4776 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4779 * To preserve message order, quit if we
4780 * can't handle a message. Add the message
4781 * back at the head, this is safe because this
4782 * tasklet is the only thing that pulls the
4785 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4789 /* Message handled */
4790 ipmi_free_smi_msg(smi_msg);
4791 /* If rv < 0, fatal error, del but don't free. */
4794 if (!run_to_completion)
4795 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4798 * If the pretimout count is non-zero, decrement one from it and
4799 * deliver pretimeouts to all the users.
4801 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4802 struct ipmi_user *user;
4805 index = srcu_read_lock(&intf->users_srcu);
4806 list_for_each_entry_rcu(user, &intf->users, link) {
4807 if (user->handler->ipmi_watchdog_pretimeout)
4808 user->handler->ipmi_watchdog_pretimeout(
4809 user->handler_data);
4811 srcu_read_unlock(&intf->users_srcu, index);
4815 static void smi_recv_tasklet(struct tasklet_struct *t)
4817 unsigned long flags = 0; /* keep us warning-free. */
4818 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
4819 int run_to_completion = intf->run_to_completion;
4820 struct ipmi_smi_msg *newmsg = NULL;
4823 * Start the next message if available.
4825 * Do this here, not in the actual receiver, because we may deadlock
4826 * because the lower layer is allowed to hold locks while calling
4832 if (!run_to_completion)
4833 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4834 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4835 struct list_head *entry = NULL;
4837 /* Pick the high priority queue first. */
4838 if (!list_empty(&intf->hp_xmit_msgs))
4839 entry = intf->hp_xmit_msgs.next;
4840 else if (!list_empty(&intf->xmit_msgs))
4841 entry = intf->xmit_msgs.next;
4845 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4846 intf->curr_msg = newmsg;
4850 if (!run_to_completion)
4851 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4853 intf->handlers->sender(intf->send_info, newmsg);
4857 handle_new_recv_msgs(intf);
4860 /* Handle a new message from the lower layer. */
4861 void ipmi_smi_msg_received(struct ipmi_smi *intf,
4862 struct ipmi_smi_msg *msg)
4864 unsigned long flags = 0; /* keep us warning-free. */
4865 int run_to_completion = intf->run_to_completion;
4868 * To preserve message order, we keep a queue and deliver from
4871 if (!run_to_completion)
4872 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4873 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4874 if (!run_to_completion)
4875 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4878 if (!run_to_completion)
4879 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4881 * We can get an asynchronous event or receive message in addition
4882 * to commands we send.
4884 if (msg == intf->curr_msg)
4885 intf->curr_msg = NULL;
4886 if (!run_to_completion)
4887 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4889 if (run_to_completion)
4890 smi_recv_tasklet(&intf->recv_tasklet);
4892 tasklet_schedule(&intf->recv_tasklet);
4894 EXPORT_SYMBOL(ipmi_smi_msg_received);
4896 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4898 if (intf->in_shutdown)
4901 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4902 tasklet_schedule(&intf->recv_tasklet);
4904 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4906 static struct ipmi_smi_msg *
4907 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4908 unsigned char seq, long seqid)
4910 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4913 * If we can't allocate the message, then just return, we
4914 * get 4 retries, so this should be ok.
4918 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4919 smi_msg->data_size = recv_msg->msg.data_len;
4920 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4922 dev_dbg(intf->si_dev, "Resend: %*ph\n",
4923 smi_msg->data_size, smi_msg->data);
4928 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4929 struct list_head *timeouts,
4930 unsigned long timeout_period,
4931 int slot, unsigned long *flags,
4934 struct ipmi_recv_msg *msg;
4936 if (intf->in_shutdown)
4942 if (timeout_period < ent->timeout) {
4943 ent->timeout -= timeout_period;
4948 if (ent->retries_left == 0) {
4949 /* The message has used all its retries. */
4951 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4952 msg = ent->recv_msg;
4953 list_add_tail(&msg->link, timeouts);
4955 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4956 else if (is_lan_addr(&ent->recv_msg->addr))
4957 ipmi_inc_stat(intf, timed_out_lan_commands);
4959 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4961 struct ipmi_smi_msg *smi_msg;
4962 /* More retries, send again. */
4967 * Start with the max timer, set to normal timer after
4968 * the message is sent.
4970 ent->timeout = MAX_MSG_TIMEOUT;
4971 ent->retries_left--;
4972 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4975 if (is_lan_addr(&ent->recv_msg->addr))
4977 dropped_rexmit_lan_commands);
4980 dropped_rexmit_ipmb_commands);
4984 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4987 * Send the new message. We send with a zero
4988 * priority. It timed out, I doubt time is that
4989 * critical now, and high priority messages are really
4990 * only for messages to the local MC, which don't get
4993 if (intf->handlers) {
4994 if (is_lan_addr(&ent->recv_msg->addr))
4996 retransmitted_lan_commands);
4999 retransmitted_ipmb_commands);
5001 smi_send(intf, intf->handlers, smi_msg, 0);
5003 ipmi_free_smi_msg(smi_msg);
5005 spin_lock_irqsave(&intf->seq_lock, *flags);
5009 static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5010 unsigned long timeout_period)
5012 struct list_head timeouts;
5013 struct ipmi_recv_msg *msg, *msg2;
5014 unsigned long flags;
5016 bool need_timer = false;
5018 if (!intf->bmc_registered) {
5019 kref_get(&intf->refcount);
5020 if (!schedule_work(&intf->bmc_reg_work)) {
5021 kref_put(&intf->refcount, intf_free);
5027 * Go through the seq table and find any messages that
5028 * have timed out, putting them in the timeouts
5031 INIT_LIST_HEAD(&timeouts);
5032 spin_lock_irqsave(&intf->seq_lock, flags);
5033 if (intf->ipmb_maintenance_mode_timeout) {
5034 if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5035 intf->ipmb_maintenance_mode_timeout = 0;
5037 intf->ipmb_maintenance_mode_timeout -= timeout_period;
5039 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
5040 check_msg_timeout(intf, &intf->seq_table[i],
5041 &timeouts, timeout_period, i,
5042 &flags, &need_timer);
5043 spin_unlock_irqrestore(&intf->seq_lock, flags);
5045 list_for_each_entry_safe(msg, msg2, &timeouts, link)
5046 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5049 * Maintenance mode handling. Check the timeout
5050 * optimistically before we claim the lock. It may
5051 * mean a timeout gets missed occasionally, but that
5052 * only means the timeout gets extended by one period
5053 * in that case. No big deal, and it avoids the lock
5056 if (intf->auto_maintenance_timeout > 0) {
5057 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5058 if (intf->auto_maintenance_timeout > 0) {
5059 intf->auto_maintenance_timeout
5061 if (!intf->maintenance_mode
5062 && (intf->auto_maintenance_timeout <= 0)) {
5063 intf->maintenance_mode_enable = false;
5064 maintenance_mode_update(intf);
5067 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5071 tasklet_schedule(&intf->recv_tasklet);
5076 static void ipmi_request_event(struct ipmi_smi *intf)
5078 /* No event requests when in maintenance mode. */
5079 if (intf->maintenance_mode_enable)
5082 if (!intf->in_shutdown)
5083 intf->handlers->request_events(intf->send_info);
5086 static struct timer_list ipmi_timer;
5088 static atomic_t stop_operation;
5090 static void ipmi_timeout(struct timer_list *unused)
5092 struct ipmi_smi *intf;
5093 bool need_timer = false;
5096 if (atomic_read(&stop_operation))
5099 index = srcu_read_lock(&ipmi_interfaces_srcu);
5100 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5101 if (atomic_read(&intf->event_waiters)) {
5102 intf->ticks_to_req_ev--;
5103 if (intf->ticks_to_req_ev == 0) {
5104 ipmi_request_event(intf);
5105 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5110 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5112 srcu_read_unlock(&ipmi_interfaces_srcu, index);
5115 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5118 static void need_waiter(struct ipmi_smi *intf)
5120 /* Racy, but worst case we start the timer twice. */
5121 if (!timer_pending(&ipmi_timer))
5122 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5125 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5126 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5128 static void free_smi_msg(struct ipmi_smi_msg *msg)
5130 atomic_dec(&smi_msg_inuse_count);
5131 /* Try to keep as much stuff out of the panic path as possible. */
5132 if (!oops_in_progress)
5136 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5138 struct ipmi_smi_msg *rv;
5139 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5141 rv->done = free_smi_msg;
5142 rv->user_data = NULL;
5143 rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5144 atomic_inc(&smi_msg_inuse_count);
5148 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5150 static void free_recv_msg(struct ipmi_recv_msg *msg)
5152 atomic_dec(&recv_msg_inuse_count);
5153 /* Try to keep as much stuff out of the panic path as possible. */
5154 if (!oops_in_progress)
5158 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
5160 struct ipmi_recv_msg *rv;
5162 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5165 rv->done = free_recv_msg;
5166 atomic_inc(&recv_msg_inuse_count);
5171 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5173 if (msg->user && !oops_in_progress)
5174 kref_put(&msg->user->refcount, free_user);
5177 EXPORT_SYMBOL(ipmi_free_recv_msg);
5179 static atomic_t panic_done_count = ATOMIC_INIT(0);
5181 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5183 atomic_dec(&panic_done_count);
5186 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5188 atomic_dec(&panic_done_count);
5192 * Inside a panic, send a message and wait for a response.
5194 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5195 struct ipmi_addr *addr,
5196 struct kernel_ipmi_msg *msg)
5198 struct ipmi_smi_msg smi_msg;
5199 struct ipmi_recv_msg recv_msg;
5202 smi_msg.done = dummy_smi_done_handler;
5203 recv_msg.done = dummy_recv_done_handler;
5204 atomic_add(2, &panic_done_count);
5205 rv = i_ipmi_request(NULL,
5214 intf->addrinfo[0].address,
5215 intf->addrinfo[0].lun,
5216 0, 1); /* Don't retry, and don't wait. */
5218 atomic_sub(2, &panic_done_count);
5219 else if (intf->handlers->flush_messages)
5220 intf->handlers->flush_messages(intf->send_info);
5222 while (atomic_read(&panic_done_count) != 0)
5226 static void event_receiver_fetcher(struct ipmi_smi *intf,
5227 struct ipmi_recv_msg *msg)
5229 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5230 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5231 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5232 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5233 /* A get event receiver command, save it. */
5234 intf->event_receiver = msg->msg.data[1];
5235 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5239 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5241 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5242 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5243 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5244 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5246 * A get device id command, save if we are an event
5247 * receiver or generator.
5249 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5250 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5254 static void send_panic_events(struct ipmi_smi *intf, char *str)
5256 struct kernel_ipmi_msg msg;
5257 unsigned char data[16];
5258 struct ipmi_system_interface_addr *si;
5259 struct ipmi_addr addr;
5261 struct ipmi_ipmb_addr *ipmb;
5264 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5267 si = (struct ipmi_system_interface_addr *) &addr;
5268 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5269 si->channel = IPMI_BMC_CHANNEL;
5272 /* Fill in an event telling that we have failed. */
5273 msg.netfn = 0x04; /* Sensor or Event. */
5274 msg.cmd = 2; /* Platform event command. */
5277 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5278 data[1] = 0x03; /* This is for IPMI 1.0. */
5279 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5280 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5281 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5284 * Put a few breadcrumbs in. Hopefully later we can add more things
5285 * to make the panic events more useful.
5293 /* Send the event announcing the panic. */
5294 ipmi_panic_request_and_wait(intf, &addr, &msg);
5297 * On every interface, dump a bunch of OEM event holding the
5300 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5304 * intf_num is used as an marker to tell if the
5305 * interface is valid. Thus we need a read barrier to
5306 * make sure data fetched before checking intf_num
5312 * First job here is to figure out where to send the
5313 * OEM events. There's no way in IPMI to send OEM
5314 * events using an event send command, so we have to
5315 * find the SEL to put them in and stick them in
5319 /* Get capabilities from the get device id. */
5320 intf->local_sel_device = 0;
5321 intf->local_event_generator = 0;
5322 intf->event_receiver = 0;
5324 /* Request the device info from the local MC. */
5325 msg.netfn = IPMI_NETFN_APP_REQUEST;
5326 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5329 intf->null_user_handler = device_id_fetcher;
5330 ipmi_panic_request_and_wait(intf, &addr, &msg);
5332 if (intf->local_event_generator) {
5333 /* Request the event receiver from the local MC. */
5334 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5335 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5338 intf->null_user_handler = event_receiver_fetcher;
5339 ipmi_panic_request_and_wait(intf, &addr, &msg);
5341 intf->null_user_handler = NULL;
5344 * Validate the event receiver. The low bit must not
5345 * be 1 (it must be a valid IPMB address), it cannot
5346 * be zero, and it must not be my address.
5348 if (((intf->event_receiver & 1) == 0)
5349 && (intf->event_receiver != 0)
5350 && (intf->event_receiver != intf->addrinfo[0].address)) {
5352 * The event receiver is valid, send an IPMB
5355 ipmb = (struct ipmi_ipmb_addr *) &addr;
5356 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5357 ipmb->channel = 0; /* FIXME - is this right? */
5358 ipmb->lun = intf->event_receiver_lun;
5359 ipmb->slave_addr = intf->event_receiver;
5360 } else if (intf->local_sel_device) {
5362 * The event receiver was not valid (or was
5363 * me), but I am an SEL device, just dump it
5366 si = (struct ipmi_system_interface_addr *) &addr;
5367 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5368 si->channel = IPMI_BMC_CHANNEL;
5371 return; /* No where to send the event. */
5373 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5374 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5380 int size = strnlen(p, 11);
5384 data[2] = 0xf0; /* OEM event without timestamp. */
5385 data[3] = intf->addrinfo[0].address;
5386 data[4] = j++; /* sequence # */
5388 memcpy_and_pad(data+5, 11, p, size, '\0');
5391 ipmi_panic_request_and_wait(intf, &addr, &msg);
5395 static int has_panicked;
5397 static int panic_event(struct notifier_block *this,
5398 unsigned long event,
5401 struct ipmi_smi *intf;
5402 struct ipmi_user *user;
5408 /* For every registered interface, set it to run to completion. */
5409 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5410 if (!intf->handlers || intf->intf_num == -1)
5411 /* Interface is not ready. */
5414 if (!intf->handlers->poll)
5418 * If we were interrupted while locking xmit_msgs_lock or
5419 * waiting_rcv_msgs_lock, the corresponding list may be
5420 * corrupted. In this case, drop items on the list for
5423 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5424 INIT_LIST_HEAD(&intf->xmit_msgs);
5425 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5427 spin_unlock(&intf->xmit_msgs_lock);
5429 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5430 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5432 spin_unlock(&intf->waiting_rcv_msgs_lock);
5434 intf->run_to_completion = 1;
5435 if (intf->handlers->set_run_to_completion)
5436 intf->handlers->set_run_to_completion(intf->send_info,
5439 list_for_each_entry_rcu(user, &intf->users, link) {
5440 if (user->handler->ipmi_panic_handler)
5441 user->handler->ipmi_panic_handler(
5442 user->handler_data);
5445 send_panic_events(intf, ptr);
5451 /* Must be called with ipmi_interfaces_mutex held. */
5452 static int ipmi_register_driver(void)
5459 rv = driver_register(&ipmidriver.driver);
5461 pr_err("Could not register IPMI driver\n");
5463 drvregistered = true;
5467 static struct notifier_block panic_block = {
5468 .notifier_call = panic_event,
5470 .priority = 200 /* priority: INT_MAX >= x >= 0 */
5473 static int ipmi_init_msghandler(void)
5477 mutex_lock(&ipmi_interfaces_mutex);
5478 rv = ipmi_register_driver();
5484 rv = init_srcu_struct(&ipmi_interfaces_srcu);
5488 remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5489 if (!remove_work_wq) {
5490 pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5495 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5496 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5498 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5504 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5506 mutex_unlock(&ipmi_interfaces_mutex);
5510 static int __init ipmi_init_msghandler_mod(void)
5514 pr_info("version " IPMI_DRIVER_VERSION "\n");
5516 mutex_lock(&ipmi_interfaces_mutex);
5517 rv = ipmi_register_driver();
5518 mutex_unlock(&ipmi_interfaces_mutex);
5523 static void __exit cleanup_ipmi(void)
5528 destroy_workqueue(remove_work_wq);
5530 atomic_notifier_chain_unregister(&panic_notifier_list,
5534 * This can't be called if any interfaces exist, so no worry
5535 * about shutting down the interfaces.
5539 * Tell the timer to stop, then wait for it to stop. This
5540 * avoids problems with race conditions removing the timer
5543 atomic_set(&stop_operation, 1);
5544 del_timer_sync(&ipmi_timer);
5546 initialized = false;
5548 /* Check for buffer leaks. */
5549 count = atomic_read(&smi_msg_inuse_count);
5551 pr_warn("SMI message count %d at exit\n", count);
5552 count = atomic_read(&recv_msg_inuse_count);
5554 pr_warn("recv message count %d at exit\n", count);
5556 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5559 driver_unregister(&ipmidriver.driver);
5561 module_exit(cleanup_ipmi);
5563 module_init(ipmi_init_msghandler_mod);
5564 MODULE_LICENSE("GPL");
5565 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5566 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5567 MODULE_VERSION(IPMI_DRIVER_VERSION);
5568 MODULE_SOFTDEP("post: ipmi_devintf");