--- zzzz-none-000/linux-3.10.107/drivers/char/ipmi/ipmi_si_intf.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/char/ipmi/ipmi_si_intf.c 2021-02-04 17:41:59.000000000 +0000 @@ -61,16 +61,19 @@ #include #include #include "ipmi_si_sm.h" -#include #include #include #include -#include #include #include #include #include +#ifdef CONFIG_PARISC +#include /* for register_parisc_driver() stuff */ +#include +#endif + #define PFX "ipmi_si: " /* Measure times between events in the driver. */ @@ -88,12 +91,9 @@ SI_GETTING_FLAGS, SI_GETTING_EVENTS, SI_CLEARING_FLAGS, - SI_CLEARING_FLAGS_THEN_SET_IRQ, SI_GETTING_MESSAGES, - SI_ENABLE_INTERRUPTS1, - SI_ENABLE_INTERRUPTS2, - SI_DISABLE_INTERRUPTS1, - SI_DISABLE_INTERRUPTS2 + SI_CHECKING_ENABLES, + SI_SETTING_ENABLES /* FIXME - add watchdog stuff. */ }; @@ -107,10 +107,6 @@ }; static char *si_to_str[] = { "kcs", "smic", "bt" }; -static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", - "ACPI", "SMBIOS", "PCI", - "device-tree", "default" }; - #define DEVICE_NAME "ipmi_si" static struct platform_driver ipmi_driver; @@ -167,11 +163,10 @@ int intf_num; ipmi_smi_t intf; struct si_sm_data *si_sm; - struct si_sm_handlers *handlers; + const struct si_sm_handlers *handlers; enum si_type si_type; spinlock_t si_lock; - struct list_head xmit_msgs; - struct list_head hp_xmit_msgs; + struct ipmi_smi_msg *waiting_msg; struct ipmi_smi_msg *curr_msg; enum si_intf_state si_state; @@ -213,7 +208,7 @@ unsigned char msg_flags; /* Does the BMC have an event buffer? */ - char has_event_buffer; + bool has_event_buffer; /* * If set to true, this will request events the next time the @@ -226,7 +221,7 @@ * call. Generally used after a panic to make sure stuff goes * out. */ - int run_to_completion; + bool run_to_completion; /* The I/O port of an SI interface. */ int port; @@ -250,8 +245,8 @@ /* The time (in jiffies) the last timeout occurred at. */ unsigned long last_timeout_jiffies; - /* Used to gracefully stop the timer without race conditions. */ - atomic_t stop_operation; + /* Are we waiting for the events, pretimeouts, received msgs? */ + atomic_t need_watch; /* * The driver will disable interrupts when it gets into a @@ -259,7 +254,34 @@ * memory. Once that situation clears up, it will re-enable * interrupts. */ - int interrupt_disabled; + bool interrupt_disabled; + + /* + * Does the BMC support events? + */ + bool supports_event_msg_buff; + + /* + * Can we disable interrupts the global enables receive irq + * bit? There are currently two forms of brokenness, some + * systems cannot disable the bit (which is technically within + * the spec but a bad idea) and some systems have the bit + * forced to zero even though interrupts work (which is + * clearly outside the spec). The next bool tells which form + * of brokenness is present. + */ + bool cannot_disable_irq; + + /* + * Some systems are broken and cannot set the irq enable + * bit, even if they support interrupts. + */ + bool irq_enable_broken; + + /* + * Did we get an attention that we did not handle? + */ + bool got_attn; /* From the get device id response... */ struct ipmi_device_id device_id; @@ -272,7 +294,7 @@ * True if we allocated the device, false if it came from * someplace else (like PCI). */ - int dev_registered; + bool dev_registered; /* Slave address, could be reported from DMI. */ unsigned char slave_addr; @@ -296,22 +318,34 @@ static int force_kipmid[SI_MAX_PARMS]; static int num_force_kipmid; #ifdef CONFIG_PCI -static int pci_registered; +static bool pci_registered; #endif -#ifdef CONFIG_ACPI -static int pnp_registered; +#ifdef CONFIG_PARISC +static bool parisc_registered; #endif static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; static int num_max_busy_us; -static int unload_when_empty = 1; +static bool unload_when_empty = true; static int add_smi(struct smi_info *smi); static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *to_clean); static void cleanup_ipmi_si(void); +#ifdef DEBUG_TIMING +void debug_timestamp(char *msg) +{ + struct timespec64 t; + + getnstimeofday64(&t); + pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec); +} +#else +#define debug_timestamp(x) +#endif + static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block *nb) { @@ -322,7 +356,10 @@ struct ipmi_smi_msg *msg) { /* Deliver the message to the upper layer. */ - ipmi_smi_msg_received(smi_info->intf, msg); + if (smi_info->intf) + ipmi_smi_msg_received(smi_info->intf, msg); + else + ipmi_free_smi_msg(msg); } static void return_hosed_msg(struct smi_info *smi_info, int cCode) @@ -346,32 +383,16 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) { int rv; - struct list_head *entry = NULL; -#ifdef DEBUG_TIMING - struct timeval t; -#endif - /* Pick the high priority queue first. */ - if (!list_empty(&(smi_info->hp_xmit_msgs))) { - entry = smi_info->hp_xmit_msgs.next; - } else if (!list_empty(&(smi_info->xmit_msgs))) { - entry = smi_info->xmit_msgs.next; - } - - if (!entry) { + if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; rv = SI_SM_IDLE; } else { int err; - list_del(entry); - smi_info->curr_msg = list_entry(entry, - struct ipmi_smi_msg, - link); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + smi_info->curr_msg = smi_info->waiting_msg; + smi_info->waiting_msg = NULL; + debug_timestamp("Start2"); err = atomic_notifier_call_chain(&xaction_notifier_list, 0, smi_info); if (err & NOTIFY_STOP_MASK) { @@ -391,33 +412,42 @@ return rv; } -static void start_enable_irq(struct smi_info *smi_info) +static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) { - unsigned char msg[2]; + smi_info->last_timeout_jiffies = jiffies; + mod_timer(&smi_info->si_timer, new_val); + smi_info->timer_running = true; +} - /* - * If we are enabling interrupts, we have to tell the - * BMC to use them. - */ - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; +/* + * Start a new message and (re)start the timer and thread. + */ +static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, + unsigned int size) +{ + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); - smi_info->si_state = SI_ENABLE_INTERRUPTS1; + if (smi_info->thread) + wake_up_process(smi_info->thread); + + smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); } -static void start_disable_irq(struct smi_info *smi_info) +static void start_check_enables(struct smi_info *smi_info, bool start_timer) { unsigned char msg[2]; msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); - smi_info->si_state = SI_DISABLE_INTERRUPTS1; + if (start_timer) + start_new_msg(smi_info, msg, 2); + else + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + smi_info->si_state = SI_CHECKING_ENABLES; } -static void start_clear_flags(struct smi_info *smi_info) +static void start_clear_flags(struct smi_info *smi_info, bool start_timer) { unsigned char msg[3]; @@ -426,15 +456,33 @@ msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; msg[2] = WDT_PRE_TIMEOUT_INT; - smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + if (start_timer) + start_new_msg(smi_info, msg, 3); + else + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); smi_info->si_state = SI_CLEARING_FLAGS; } -static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) +static void start_getting_msg_queue(struct smi_info *smi_info) { - smi_info->last_timeout_jiffies = jiffies; - mod_timer(&smi_info->si_timer, new_val); - smi_info->timer_running = true; + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; + smi_info->curr_msg->data_size = 2; + + start_new_msg(smi_info, smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_MESSAGES; +} + +static void start_getting_events(struct smi_info *smi_info) +{ + smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); + smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; + smi_info->curr_msg->data_size = 2; + + start_new_msg(smi_info, smi_info->curr_msg->data, + smi_info->curr_msg->data_size); + smi_info->si_state = SI_GETTING_EVENTS; } /* @@ -442,23 +490,49 @@ * allocate messages, we just leave them in the BMC and run the system * polled until we can allocate some memory. Once we have some * memory, we will re-enable the interrupt. + * + * Note that we cannot just use disable_irq(), since the interrupt may + * be shared. */ -static inline void disable_si_irq(struct smi_info *smi_info) +static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { - start_disable_irq(smi_info); - smi_info->interrupt_disabled = 1; - if (!atomic_read(&smi_info->stop_operation)) - smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + smi_info->interrupt_disabled = true; + start_check_enables(smi_info, start_timer); + return true; } + return false; } -static inline void enable_si_irq(struct smi_info *smi_info) +static inline bool enable_si_irq(struct smi_info *smi_info) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) { - start_enable_irq(smi_info); - smi_info->interrupt_disabled = 0; + smi_info->interrupt_disabled = false; + start_check_enables(smi_info, true); + return true; } + return false; +} + +/* + * Allocate a message. If unable to allocate, start the interrupt + * disable process and return NULL. If able to allocate but + * interrupts are disabled, free the message and return NULL after + * starting the interrupt enable process. + */ +static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) +{ + struct ipmi_smi_msg *msg; + + msg = ipmi_alloc_smi_msg(); + if (!msg) { + if (!disable_si_irq(smi_info, true)) + smi_info->si_state = SI_NORMAL; + } else if (enable_si_irq(smi_info)) { + ipmi_free_smi_msg(msg); + msg = NULL; + } + return msg; } static void handle_flags(struct smi_info *smi_info) @@ -468,47 +542,24 @@ /* Watchdog pre-timeout */ smi_inc_stat(smi_info, watchdog_pretimeouts); - start_clear_flags(smi_info); + start_clear_flags(smi_info, true); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; - ipmi_smi_watchdog_pretimeout(smi_info->intf); + if (smi_info->intf) + ipmi_smi_watchdog_pretimeout(smi_info->intf); } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) { - disable_si_irq(smi_info); - smi_info->si_state = SI_NORMAL; + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) return; - } - enable_si_irq(smi_info); - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; - smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_MESSAGES; + start_getting_msg_queue(smi_info); } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { /* Events available. */ - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) { - disable_si_irq(smi_info); - smi_info->si_state = SI_NORMAL; + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) return; - } - enable_si_irq(smi_info); - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; - smi_info->curr_msg->data_size = 2; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_EVENTS; + start_getting_events(smi_info); } else if (smi_info->msg_flags & OEM_DATA_AVAIL && smi_info->oem_data_avail_handler) { if (smi_info->oem_data_avail_handler(smi_info)) @@ -517,15 +568,56 @@ smi_info->si_state = SI_NORMAL; } +/* + * Global enables we care about. + */ +#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ + IPMI_BMC_EVT_MSG_INTR) + +static u8 current_global_enables(struct smi_info *smi_info, u8 base, + bool *irq_on) +{ + u8 enables = 0; + + if (smi_info->supports_event_msg_buff) + enables |= IPMI_BMC_EVT_MSG_BUFF; + + if (((smi_info->irq && !smi_info->interrupt_disabled) || + smi_info->cannot_disable_irq) && + !smi_info->irq_enable_broken) + enables |= IPMI_BMC_RCV_MSG_INTR; + + if (smi_info->supports_event_msg_buff && + smi_info->irq && !smi_info->interrupt_disabled && + !smi_info->irq_enable_broken) + enables |= IPMI_BMC_EVT_MSG_INTR; + + *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); + + return enables; +} + +static void check_bt_irq(struct smi_info *smi_info, bool irq_on) +{ + u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); + + irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; + + if ((bool)irqstate == irq_on) + return; + + if (irq_on) + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, + IPMI_BT_INTMASK_ENABLE_IRQ_BIT); + else + smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); +} + static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; -#ifdef DEBUG_TIMING - struct timeval t; - do_gettimeofday(&t); - printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Done"); switch (smi_info->si_state) { case SI_NORMAL: if (!smi_info->curr_msg) @@ -571,7 +663,6 @@ } case SI_CLEARING_FLAGS: - case SI_CLEARING_FLAGS_THEN_SET_IRQ: { unsigned char msg[3]; @@ -582,10 +673,7 @@ dev_warn(smi_info->dev, "Error clearing flags: %2.2x\n", msg[2]); } - if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) - start_enable_irq(smi_info); - else - smi_info->si_state = SI_NORMAL; + smi_info->si_state = SI_NORMAL; break; } @@ -665,9 +753,11 @@ break; } - case SI_ENABLE_INTERRUPTS1: + case SI_CHECKING_ENABLES: { unsigned char msg[4]; + u8 enables; + bool irq_on; /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); @@ -677,70 +767,53 @@ dev_warn(smi_info->dev, "Maybe ok, but ipmi might run very slowly.\n"); smi_info->si_state = SI_NORMAL; - } else { + break; + } + enables = current_global_enables(smi_info, 0, &irq_on); + if (smi_info->si_type == SI_BT) + /* BT has its own interrupt enable bit. */ + check_bt_irq(smi_info, irq_on); + if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { + /* Enables are not correct, fix them. */ msg[0] = (IPMI_NETFN_APP_REQUEST << 2); msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = (msg[3] | - IPMI_BMC_RCV_MSG_INTR | - IPMI_BMC_EVT_MSG_INTR); + msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); smi_info->handlers->start_transaction( smi_info->si_sm, msg, 3); - smi_info->si_state = SI_ENABLE_INTERRUPTS2; + smi_info->si_state = SI_SETTING_ENABLES; + } else if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); + } else { + smi_info->si_state = SI_NORMAL; } break; } - case SI_ENABLE_INTERRUPTS2: + case SI_SETTING_ENABLES: { unsigned char msg[4]; - /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { + if (msg[2] != 0) dev_warn(smi_info->dev, - "Couldn't set irq info: %x.\n", msg[2]); - dev_warn(smi_info->dev, - "Maybe ok, but ipmi might run very slowly.\n"); - } else - smi_info->interrupt_disabled = 0; - smi_info->si_state = SI_NORMAL; - break; - } + "Could not set the global enables: 0x%x.\n", + msg[2]); - case SI_DISABLE_INTERRUPTS1: - { - unsigned char msg[4]; - - /* We got the flags from the SMI, now handle them. */ - smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not disable interrupts" - ", failed get.\n"); - smi_info->si_state = SI_NORMAL; + if (smi_info->supports_event_msg_buff) { + smi_info->curr_msg = ipmi_alloc_smi_msg(); + if (!smi_info->curr_msg) { + smi_info->si_state = SI_NORMAL; + break; + } + start_getting_msg_queue(smi_info); } else { - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; - msg[2] = (msg[3] & - ~(IPMI_BMC_RCV_MSG_INTR | - IPMI_BMC_EVT_MSG_INTR)); - smi_info->handlers->start_transaction( - smi_info->si_sm, msg, 3); - smi_info->si_state = SI_DISABLE_INTERRUPTS2; - } - break; - } - - case SI_DISABLE_INTERRUPTS2: - { - unsigned char msg[4]; - - /* We got the flags from the SMI, now handle them. */ - smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not disable interrupts" - ", failed set.\n"); + smi_info->si_state = SI_NORMAL; } - smi_info->si_state = SI_NORMAL; break; } } @@ -798,25 +871,34 @@ * We prefer handling attn over new messages. But don't do * this if there is not yet an upper layer to handle anything. */ - if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { + if (likely(smi_info->intf) && + (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) { unsigned char msg[2]; - smi_inc_stat(smi_info, attentions); + if (smi_info->si_state != SI_NORMAL) { + /* + * We got an ATTN, but we are doing something else. + * Handle the ATTN later. + */ + smi_info->got_attn = true; + } else { + smi_info->got_attn = false; + smi_inc_stat(smi_info, attentions); - /* - * Got a attn, send down a get message flags to see - * what's causing it. It would be better to handle - * this in the upper layer, but due to the way - * interrupts work with the SMI, that's not really - * possible. - */ - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_MSG_FLAGS_CMD; + /* + * Got a attn, send down a get message flags to see + * what's causing it. It would be better to handle + * this in the upper layer, but due to the way + * interrupts work with the SMI, that's not really + * possible. + */ + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; - smi_info->handlers->start_transaction( - smi_info->si_sm, msg, 2); - smi_info->si_state = SI_GETTING_FLAGS; - goto restart; + start_new_msg(smi_info, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; + goto restart; + } } /* If we are currently idle, try to start the next message. */ @@ -836,104 +918,101 @@ */ atomic_set(&smi_info->req_events, 0); - smi_info->curr_msg = ipmi_alloc_smi_msg(); - if (!smi_info->curr_msg) - goto out; - - smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); - smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; - smi_info->curr_msg->data_size = 2; + /* + * Take this opportunity to check the interrupt and + * message enable state for the BMC. The BMC can be + * asynchronously reset, and may thus get interrupts + * disable and messages disabled. + */ + if (smi_info->supports_event_msg_buff || smi_info->irq) { + start_check_enables(smi_info, true); + } else { + smi_info->curr_msg = alloc_msg_handle_irq(smi_info); + if (!smi_info->curr_msg) + goto out; - smi_info->handlers->start_transaction( - smi_info->si_sm, - smi_info->curr_msg->data, - smi_info->curr_msg->data_size); - smi_info->si_state = SI_GETTING_EVENTS; + start_getting_events(smi_info); + } goto restart; } + + if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { + /* Ok it if fails, the timer will just go off. */ + if (del_timer(&smi_info->si_timer)) + smi_info->timer_running = false; + } + out: return si_sm_result; } -static void sender(void *send_info, - struct ipmi_smi_msg *msg, - int priority) +static void check_start_timer_thread(struct smi_info *smi_info) { - struct smi_info *smi_info = send_info; + if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + + start_next_msg(smi_info); + smi_event_handler(smi_info, 0); + } +} + +static void flush_messages(void *send_info) +{ + struct smi_info *smi_info = send_info; enum si_sm_result result; - unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif - if (atomic_read(&smi_info->stop_operation)) { - msg->rsp[0] = msg->data[0] | 4; - msg->rsp[1] = msg->data[1]; - msg->rsp[2] = IPMI_ERR_UNSPECIFIED; - msg->rsp_size = 3; - deliver_recv_msg(smi_info, msg); - return; + /* + * Currently, this function is called only in run-to-completion + * mode. This means we are single-threaded, no need for locks. + */ + result = smi_event_handler(smi_info, 0); + while (result != SI_SM_IDLE) { + udelay(SI_SHORT_TIMEOUT_USEC); + result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } +} -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif +static void sender(void *send_info, + struct ipmi_smi_msg *msg) +{ + struct smi_info *smi_info = send_info; + unsigned long flags; - if (smi_info->run_to_completion) { - /* - * If we are running to completion, then throw it in - * the list and run transactions until everything is - * clear. Priority doesn't matter here. - */ + debug_timestamp("Enqueue"); + if (smi_info->run_to_completion) { /* - * Run to completion means we are single-threaded, no - * need for locks. + * If we are running to completion, start it. Upper + * layer will call flush_messages to clear it out. */ - list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); - - result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { - udelay(SI_SHORT_TIMEOUT_USEC); - result = smi_event_handler(smi_info, - SI_SHORT_TIMEOUT_USEC); - } + smi_info->waiting_msg = msg; return; } spin_lock_irqsave(&smi_info->si_lock, flags); - if (priority > 0) - list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); - else - list_add_tail(&msg->link, &smi_info->xmit_msgs); - - if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { - smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); - - if (smi_info->thread) - wake_up_process(smi_info->thread); - - start_next_msg(smi_info); - smi_event_handler(smi_info, 0); - } + /* + * The following two lines don't need to be under the lock for + * the lock's sake, but they do need SMP memory barriers to + * avoid getting things out of order. We are already claiming + * the lock, anyway, so just do it under the lock to avoid the + * ordering problem. + */ + BUG_ON(smi_info->waiting_msg); + smi_info->waiting_msg = msg; + check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } -static void set_run_to_completion(void *send_info, int i_run_to_completion) +static void set_run_to_completion(void *send_info, bool i_run_to_completion) { struct smi_info *smi_info = send_info; - enum si_sm_result result; smi_info->run_to_completion = i_run_to_completion; - if (i_run_to_completion) { - result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { - udelay(SI_SHORT_TIMEOUT_USEC); - result = smi_event_handler(smi_info, - SI_SHORT_TIMEOUT_USEC); - } - } + if (i_run_to_completion) + flush_messages(smi_info); } /* @@ -941,18 +1020,18 @@ * we are spinning in kipmid looking for something and not delaying * between checks */ -static inline void ipmi_si_set_not_busy(struct timespec *ts) +static inline void ipmi_si_set_not_busy(struct timespec64 *ts) { ts->tv_nsec = -1; } -static inline int ipmi_si_is_busy(struct timespec *ts) +static inline int ipmi_si_is_busy(struct timespec64 *ts) { return ts->tv_nsec != -1; } -static int ipmi_thread_busy_wait(enum si_sm_result smi_result, - const struct smi_info *smi_info, - struct timespec *busy_until) +static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, + const struct smi_info *smi_info, + struct timespec64 *busy_until) { unsigned int max_busy_us = 0; @@ -961,12 +1040,13 @@ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) ipmi_si_set_not_busy(busy_until); else if (!ipmi_si_is_busy(busy_until)) { - getnstimeofday(busy_until); - timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); + getnstimeofday64(busy_until); + timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); } else { - struct timespec now; - getnstimeofday(&now); - if (unlikely(timespec_compare(&now, busy_until) > 0)) { + struct timespec64 now; + + getnstimeofday64(&now); + if (unlikely(timespec64_compare(&now, busy_until) > 0)) { ipmi_si_set_not_busy(busy_until); return 0; } @@ -989,10 +1069,10 @@ struct smi_info *smi_info = data; unsigned long flags; enum si_sm_result smi_result; - struct timespec busy_until; + struct timespec64 busy_until; ipmi_si_set_not_busy(&busy_until); - set_user_nice(current, 19); + set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { int busy_wait; @@ -1016,9 +1096,15 @@ ; /* do nothing */ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) schedule(); - else if (smi_result == SI_SM_IDLE) - schedule_timeout_interruptible(100); - else + else if (smi_result == SI_SM_IDLE) { + if (atomic_read(&smi_info->need_watch)) { + schedule_timeout_interruptible(100); + } else { + /* Wait to be woken up when we are needed. */ + __set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } else schedule_timeout_interruptible(1); } return 0; @@ -1029,7 +1115,7 @@ { struct smi_info *smi_info = send_info; unsigned long flags = 0; - int run_to_completion = smi_info->run_to_completion; + bool run_to_completion = smi_info->run_to_completion; /* * Make sure there is some delay in the poll loop so we can @@ -1047,13 +1133,23 @@ { struct smi_info *smi_info = send_info; - if (atomic_read(&smi_info->stop_operation) || - !smi_info->has_event_buffer) + if (!smi_info->has_event_buffer) return; atomic_set(&smi_info->req_events, 1); } +static void set_need_watch(void *send_info, bool enable) +{ + struct smi_info *smi_info = send_info; + unsigned long flags; + + atomic_set(&smi_info->need_watch, enable); + spin_lock_irqsave(&smi_info->si_lock, flags); + check_start_timer_thread(smi_info); + spin_unlock_irqrestore(&smi_info->si_lock, flags); +} + static int initialized; static void smi_timeout(unsigned long data) @@ -1064,15 +1160,10 @@ unsigned long jiffies_now; long time_diff; long timeout; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Timer"); + jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); @@ -1109,18 +1200,13 @@ { struct smi_info *smi_info = data; unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Interrupt"); + smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED; @@ -1191,7 +1277,7 @@ return 0; } -static void set_maintenance_mode(void *send_info, int enable) +static void set_maintenance_mode(void *send_info, bool enable) { struct smi_info *smi_info = send_info; @@ -1199,14 +1285,16 @@ atomic_set(&smi_info->req_events, 0); } -static struct ipmi_smi_handlers handlers = { +static const struct ipmi_smi_handlers handlers = { .owner = THIS_MODULE, .start_processing = smi_start_processing, .get_smi_info = get_smi_info, .sender = sender, .request_events = request_events, + .set_need_watch = set_need_watch, .set_maintenance_mode = set_maintenance_mode, .set_run_to_completion = set_run_to_completion, + .flush_messages = flush_messages, .poll = poll, }; @@ -1223,16 +1311,16 @@ #define DEFAULT_REGSIZE 1 #ifdef CONFIG_ACPI -static bool si_tryacpi = 1; +static bool si_tryacpi = true; #endif #ifdef CONFIG_DMI -static bool si_trydmi = 1; +static bool si_trydmi = true; #endif -static bool si_tryplatform = 1; +static bool si_tryplatform = true; #ifdef CONFIG_PCI -static bool si_trypci = 1; +static bool si_trypci = true; #endif -static bool si_trydefaults = 1; +static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 static char si_type_str[MAX_SI_TYPE_STR]; @@ -1331,7 +1419,7 @@ MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" " disabled(0). Normally the IPMI driver auto-detects" " this, but the value may be overridden by this parm."); -module_param(unload_when_empty, int, 0); +module_param(unload_when_empty, bool, 0); MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" " specified or found, default is 1. Setting to 0" " is useful for hot add of devices using hotmod."); @@ -1360,7 +1448,7 @@ if (info->si_type == SI_BT) { rv = request_irq(info->irq, si_bt_irq_handler, - IRQF_SHARED | IRQF_DISABLED, + IRQF_SHARED, DEVICE_NAME, info); if (!rv) @@ -1370,7 +1458,7 @@ } else rv = request_irq(info->irq, si_irq_handler, - IRQF_SHARED | IRQF_DISABLED, + IRQF_SHARED, DEVICE_NAME, info); if (rv) { @@ -1386,14 +1474,14 @@ return rv; } -static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return inb(addr + (offset * io->regspacing)); } -static void port_outb(struct si_sm_io *io, unsigned int offset, +static void port_outb(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1401,14 +1489,14 @@ outb(b, addr + (offset * io->regspacing)); } -static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } -static void port_outw(struct si_sm_io *io, unsigned int offset, +static void port_outw(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1416,14 +1504,14 @@ outw(b << io->regshift, addr + (offset * io->regspacing)); } -static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) +static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset) { unsigned int addr = io->addr_data; return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; } -static void port_outl(struct si_sm_io *io, unsigned int offset, +static void port_outl(const struct si_sm_io *io, unsigned int offset, unsigned char b) { unsigned int addr = io->addr_data; @@ -1496,49 +1584,52 @@ return 0; } -static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inb(const struct si_sm_io *io, + unsigned int offset) { return readb((io->addr)+(offset * io->regspacing)); } -static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writeb(b, (io->addr)+(offset * io->regspacing)); } -static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inw(const struct si_sm_io *io, + unsigned int offset) { return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); } -static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) +static unsigned char intf_mem_inl(const struct si_sm_io *io, + unsigned int offset) { return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, - unsigned char b) +static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, + unsigned char b) { writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); } #ifdef readq -static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) +static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } -static void mem_outq(struct si_sm_io *io, unsigned int offset, +static void mem_outq(const struct si_sm_io *io, unsigned int offset, unsigned char b) { writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); @@ -1664,7 +1755,7 @@ } *s = '\0'; s++; - for (i = 0; hotmod_ops[i].name; i++) { + for (i = 0; v[i].name; i++) { if (strcmp(*curr, v[i].name) == 0) { *val = v[i].val; *curr = s; @@ -1851,11 +1942,15 @@ info->irq_setup = std_irq_setup; info->slave_addr = ipmb; - if (!add_smi(info)) { - if (try_smi_init(info)) - cleanup_one_si(info); - } else { + rv = add_smi(info); + if (rv) { kfree(info); + goto out; + } + rv = try_smi_init(info); + if (rv) { + cleanup_one_si(info); + goto out; } } else { /* remove */ @@ -1969,18 +2064,13 @@ { struct smi_info *smi_info = context; unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("ACPI_GPE"); + smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); @@ -2002,7 +2092,6 @@ if (!info->irq) return 0; - /* FIXME - is level triggered right? */ status = acpi_install_gpe_handler(NULL, info->irq, ACPI_GPE_LEVEL_TRIGGERED, @@ -2069,6 +2158,7 @@ static int try_init_spmi(struct SPMITable *spmi) { struct smi_info *info; + int rv; if (spmi->IPMIlegacy != 1) { printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); @@ -2095,6 +2185,9 @@ case 3: /* BT */ info->si_type = SI_BT; break; + case 4: /* SSIF, just ignore */ + kfree(info); + return -EIO; default: printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", spmi->InterfaceType); @@ -2143,10 +2236,11 @@ info->io.addr_data, info->io.regsize, info->io.regspacing, info->irq); - if (add_smi(info)) + rv = add_smi(info); + if (rv) kfree(info); - return 0; + return rv; } static void spmi_find_bmc(void) @@ -2170,125 +2264,6 @@ try_init_spmi(spmi); } } - -static int ipmi_pnp_probe(struct pnp_dev *dev, - const struct pnp_device_id *dev_id) -{ - struct acpi_device *acpi_dev; - struct smi_info *info; - struct resource *res, *res_second; - acpi_handle handle; - acpi_status status; - unsigned long long tmp; - - acpi_dev = pnp_acpi_device(dev); - if (!acpi_dev) - return -ENODEV; - - info = smi_info_alloc(); - if (!info) - return -ENOMEM; - - info->addr_source = SI_ACPI; - printk(KERN_INFO PFX "probing via ACPI\n"); - - handle = acpi_dev->handle; - info->addr_info.acpi_info.acpi_handle = handle; - - /* _IFT tells us the interface type: KCS, BT, etc */ - status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); - if (ACPI_FAILURE(status)) - goto err_free; - - switch (tmp) { - case 1: - info->si_type = SI_KCS; - break; - case 2: - info->si_type = SI_SMIC; - break; - case 3: - info->si_type = SI_BT; - break; - default: - dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); - goto err_free; - } - - res = pnp_get_resource(dev, IORESOURCE_IO, 0); - if (res) { - info->io_setup = port_setup; - info->io.addr_type = IPMI_IO_ADDR_SPACE; - } else { - res = pnp_get_resource(dev, IORESOURCE_MEM, 0); - if (res) { - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - } - } - if (!res) { - dev_err(&dev->dev, "no I/O or memory address\n"); - goto err_free; - } - info->io.addr_data = res->start; - - info->io.regspacing = DEFAULT_REGSPACING; - res_second = pnp_get_resource(dev, - (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? - IORESOURCE_IO : IORESOURCE_MEM, - 1); - if (res_second) { - if (res_second->start > info->io.addr_data) - info->io.regspacing = res_second->start - info->io.addr_data; - } - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = 0; - - /* If _GPE exists, use it; otherwise use standard interrupts */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_SUCCESS(status)) { - info->irq = tmp; - info->irq_setup = acpi_gpe_irq_setup; - } else if (pnp_irq_valid(dev, 0)) { - info->irq = pnp_irq(dev, 0); - info->irq_setup = std_irq_setup; - } - - info->dev = &dev->dev; - pnp_set_drvdata(dev, info); - - dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", - res, info->io.regsize, info->io.regspacing, - info->irq); - - if (add_smi(info)) - goto err_free; - - return 0; - -err_free: - kfree(info); - return -EINVAL; -} - -static void ipmi_pnp_remove(struct pnp_dev *dev) -{ - struct smi_info *info = pnp_get_drvdata(dev); - - cleanup_one_si(info); -} - -static const struct pnp_device_id pnp_dev_table[] = { - {"IPI0001", 0}, - {"", 0}, -}; - -static struct pnp_driver ipmi_pnp_driver = { - .name = DEVICE_NAME, - .probe = ipmi_pnp_probe, - .remove = ipmi_pnp_remove, - .id_table = pnp_dev_table, -}; #endif #ifdef CONFIG_DMI @@ -2566,19 +2541,23 @@ &pdev->resource[0], info->io.regsize, info->io.regspacing, info->irq); - if (add_smi(info)) + rv = add_smi(info); + if (rv) { kfree(info); + pci_disable_device(pdev); + } - return 0; + return rv; } static void ipmi_pci_remove(struct pci_dev *pdev) { struct smi_info *info = pci_get_drvdata(pdev); cleanup_one_si(info); + pci_disable_device(pdev); } -static struct pci_device_id ipmi_pci_devices[] = { +static const struct pci_device_id ipmi_pci_devices[] = { { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, { 0, } @@ -2593,10 +2572,20 @@ }; #endif /* CONFIG_PCI */ -static struct of_device_id ipmi_match[]; -static int ipmi_probe(struct platform_device *dev) -{ #ifdef CONFIG_OF +static const struct of_device_id of_ipmi_match[] = { + { .type = "ipmi", .compatible = "ipmi-kcs", + .data = (void *)(unsigned long) SI_KCS }, + { .type = "ipmi", .compatible = "ipmi-smic", + .data = (void *)(unsigned long) SI_SMIC }, + { .type = "ipmi", .compatible = "ipmi-bt", + .data = (void *)(unsigned long) SI_BT }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_ipmi_match); + +static int of_ipmi_probe(struct platform_device *dev) +{ const struct of_device_id *match; struct smi_info *info; struct resource resource; @@ -2607,8 +2596,11 @@ dev_info(&dev->dev, "probing via device tree\n"); - match = of_match_device(ipmi_match, &dev->dev); + match = of_match_device(of_ipmi_match, &dev->dev); if (!match) + return -ENODEV; + + if (!of_device_is_available(np)) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); @@ -2670,43 +2662,227 @@ dev_set_drvdata(&dev->dev, info); - if (add_smi(info)) { + ret = add_smi(info); + if (ret) { kfree(info); - return -EBUSY; + return ret; } -#endif return 0; } +#else +#define of_ipmi_match NULL +static int of_ipmi_probe(struct platform_device *dev) +{ + return -ENODEV; +} +#endif -static int ipmi_remove(struct platform_device *dev) +#ifdef CONFIG_ACPI +static int acpi_ipmi_probe(struct platform_device *dev) { -#ifdef CONFIG_OF - cleanup_one_si(dev_get_drvdata(&dev->dev)); + struct smi_info *info; + struct resource *res, *res_second; + acpi_handle handle; + acpi_status status; + unsigned long long tmp; + int rv = -EINVAL; + + handle = ACPI_HANDLE(&dev->dev); + if (!handle) + return -ENODEV; + + info = smi_info_alloc(); + if (!info) + return -ENOMEM; + + info->addr_source = SI_ACPI; + dev_info(&dev->dev, PFX "probing via ACPI\n"); + + info->addr_info.acpi_info.acpi_handle = handle; + + /* _IFT tells us the interface type: KCS, BT, etc */ + status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); + if (ACPI_FAILURE(status)) { + dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); + goto err_free; + } + + switch (tmp) { + case 1: + info->si_type = SI_KCS; + break; + case 2: + info->si_type = SI_SMIC; + break; + case 3: + info->si_type = SI_BT; + break; + case 4: /* SSIF, just ignore */ + rv = -ENODEV; + goto err_free; + default: + dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); + goto err_free; + } + + res = platform_get_resource(dev, IORESOURCE_IO, 0); + if (res) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; + } else { + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + } + if (!res) { + dev_err(&dev->dev, "no I/O or memory address\n"); + goto err_free; + } + info->io.addr_data = res->start; + + info->io.regspacing = DEFAULT_REGSPACING; + res_second = platform_get_resource(dev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res_second) { + if (res_second->start > info->io.addr_data) + info->io.regspacing = + res_second->start - info->io.addr_data; + } + info->io.regsize = DEFAULT_REGSPACING; + info->io.regshift = 0; + + /* If _GPE exists, use it; otherwise use standard interrupts */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) { + info->irq = tmp; + info->irq_setup = acpi_gpe_irq_setup; + } else { + int irq = platform_get_irq(dev, 0); + + if (irq > 0) { + info->irq = irq; + info->irq_setup = std_irq_setup; + } + } + + info->dev = &dev->dev; + platform_set_drvdata(dev, info); + + dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", + res, info->io.regsize, info->io.regspacing, + info->irq); + + rv = add_smi(info); + if (rv) + kfree(info); + + return rv; + +err_free: + kfree(info); + return rv; +} + +static const struct acpi_device_id acpi_ipmi_match[] = { + { "IPI0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match); +#else +static int acpi_ipmi_probe(struct platform_device *dev) +{ + return -ENODEV; +} #endif - return 0; + +static int ipmi_probe(struct platform_device *dev) +{ + if (of_ipmi_probe(dev) == 0) + return 0; + + return acpi_ipmi_probe(dev); } -static struct of_device_id ipmi_match[] = +static int ipmi_remove(struct platform_device *dev) { - { .type = "ipmi", .compatible = "ipmi-kcs", - .data = (void *)(unsigned long) SI_KCS }, - { .type = "ipmi", .compatible = "ipmi-smic", - .data = (void *)(unsigned long) SI_SMIC }, - { .type = "ipmi", .compatible = "ipmi-bt", - .data = (void *)(unsigned long) SI_BT }, - {}, -}; + struct smi_info *info = dev_get_drvdata(&dev->dev); + + cleanup_one_si(info); + return 0; +} static struct platform_driver ipmi_driver = { .driver = { .name = DEVICE_NAME, - .owner = THIS_MODULE, - .of_match_table = ipmi_match, + .of_match_table = of_ipmi_match, + .acpi_match_table = ACPI_PTR(acpi_ipmi_match), }, .probe = ipmi_probe, .remove = ipmi_remove, }; +#ifdef CONFIG_PARISC +static int ipmi_parisc_probe(struct parisc_device *dev) +{ + struct smi_info *info; + int rv; + + info = smi_info_alloc(); + + if (!info) { + dev_err(&dev->dev, + "could not allocate memory for PARISC probe\n"); + return -ENOMEM; + } + + info->si_type = SI_KCS; + info->addr_source = SI_DEVICETREE; + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + info->io.addr_data = dev->hpa.start; + info->io.regsize = 1; + info->io.regspacing = 1; + info->io.regshift = 0; + info->irq = 0; /* no interrupt */ + info->irq_setup = NULL; + info->dev = &dev->dev; + + dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data); + + dev_set_drvdata(&dev->dev, info); + + rv = add_smi(info); + if (rv) { + kfree(info); + return rv; + } + + return 0; +} + +static int ipmi_parisc_remove(struct parisc_device *dev) +{ + cleanup_one_si(dev_get_drvdata(&dev->dev)); + return 0; +} + +static struct parisc_device_id ipmi_parisc_tbl[] = { + { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 }, + { 0, } +}; + +static struct parisc_driver ipmi_parisc_driver = { + .name = "ipmi", + .id_table = ipmi_parisc_tbl, + .probe = ipmi_parisc_probe, + .remove = ipmi_parisc_remove, +}; +#endif /* CONFIG_PARISC */ + static int wait_for_msg_done(struct smi_info *smi_info) { enum si_sm_result smi_result; @@ -2768,6 +2944,172 @@ return rv; } +static int get_global_enables(struct smi_info *smi_info, u8 *enables) +{ + unsigned char msg[3]; + unsigned char *resp; + unsigned long resp_len; + int rv; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); + + rv = wait_for_msg_done(smi_info); + if (rv) { + dev_warn(smi_info->dev, + "Error getting response from get global enables command: %d\n", + rv); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 4 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || + resp[2] != 0) { + dev_warn(smi_info->dev, + "Invalid return from get global enables command: %ld %x %x %x\n", + resp_len, resp[0], resp[1], resp[2]); + rv = -EINVAL; + goto out; + } else { + *enables = resp[3]; + } + +out: + kfree(resp); + return rv; +} + +/* + * Returns 1 if it gets an error from the command. + */ +static int set_global_enables(struct smi_info *smi_info, u8 enables) +{ + unsigned char msg[3]; + unsigned char *resp; + unsigned long resp_len; + int rv; + + resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); + if (!resp) + return -ENOMEM; + + msg[0] = IPMI_NETFN_APP_REQUEST << 2; + msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; + msg[2] = enables; + smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); + + rv = wait_for_msg_done(smi_info); + if (rv) { + dev_warn(smi_info->dev, + "Error getting response from set global enables command: %d\n", + rv); + goto out; + } + + resp_len = smi_info->handlers->get_result(smi_info->si_sm, + resp, IPMI_MAX_MSG_LENGTH); + + if (resp_len < 3 || + resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || + resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { + dev_warn(smi_info->dev, + "Invalid return from set global enables command: %ld %x %x\n", + resp_len, resp[0], resp[1]); + rv = -EINVAL; + goto out; + } + + if (resp[2] != 0) + rv = 1; + +out: + kfree(resp); + return rv; +} + +/* + * Some BMCs do not support clearing the receive irq bit in the global + * enables (even if they don't support interrupts on the BMC). Check + * for this and handle it properly. + */ +static void check_clr_rcv_irq(struct smi_info *smi_info) +{ + u8 enables = 0; + int rv; + + rv = get_global_enables(smi_info, &enables); + if (!rv) { + if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) + /* Already clear, should work ok. */ + return; + + enables &= ~IPMI_BMC_RCV_MSG_INTR; + rv = set_global_enables(smi_info, enables); + } + + if (rv < 0) { + dev_err(smi_info->dev, + "Cannot check clearing the rcv irq: %d\n", rv); + return; + } + + if (rv) { + /* + * An error when setting the event buffer bit means + * clearing the bit is not supported. + */ + dev_warn(smi_info->dev, + "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); + smi_info->cannot_disable_irq = true; + } +} + +/* + * Some BMCs do not support setting the interrupt bits in the global + * enables even if they support interrupts. Clearly bad, but we can + * compensate. + */ +static void check_set_rcv_irq(struct smi_info *smi_info) +{ + u8 enables = 0; + int rv; + + if (!smi_info->irq) + return; + + rv = get_global_enables(smi_info, &enables); + if (!rv) { + enables |= IPMI_BMC_RCV_MSG_INTR; + rv = set_global_enables(smi_info, enables); + } + + if (rv < 0) { + dev_err(smi_info->dev, + "Cannot check setting the rcv irq: %d\n", rv); + return; + } + + if (rv) { + /* + * An error when setting the event buffer bit means + * setting the bit is not supported. + */ + dev_warn(smi_info->dev, + "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); + smi_info->cannot_disable_irq = true; + smi_info->irq_enable_broken = true; + } +} + static int try_enable_event_buffer(struct smi_info *smi_info) { unsigned char msg[3]; @@ -2804,9 +3146,11 @@ goto out; } - if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) + if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { /* buffer is already enabled, nothing to do. */ + smi_info->supports_event_msg_buff = true; goto out; + } msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; @@ -2839,6 +3183,9 @@ * that the event buffer is not supported. */ rv = -ENOENT; + else + smi_info->supports_event_msg_buff = true; + out: kfree(resp); return rv; @@ -2848,7 +3195,9 @@ { struct smi_info *smi = m->private; - return seq_printf(m, "%s\n", si_to_str[smi->si_type]); + seq_printf(m, "%s\n", si_to_str[smi->si_type]); + + return 0; } static int smi_type_proc_open(struct inode *inode, struct file *file) @@ -2910,16 +3259,18 @@ { struct smi_info *smi = m->private; - return seq_printf(m, - "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", - si_to_str[smi->si_type], - addr_space_to_str[smi->io.addr_type], - smi->io.addr_data, - smi->io.regspacing, - smi->io.regsize, - smi->io.regshift, - smi->irq, - smi->slave_addr); + seq_printf(m, + "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", + si_to_str[smi->si_type], + addr_space_to_str[smi->io.addr_type], + smi->io.addr_data, + smi->io.regspacing, + smi->io.regsize, + smi->io.regshift, + smi->irq, + smi->slave_addr); + + return 0; } static int smi_params_proc_open(struct inode *inode, struct file *file) @@ -3077,20 +3428,21 @@ setup_dell_poweredge_bt_xaction_handler(smi_info); } +static void check_for_broken_irqs(struct smi_info *smi_info) +{ + check_clr_rcv_irq(smi_info); + check_set_rcv_irq(smi_info); +} + static inline void wait_for_timer_and_thread(struct smi_info *smi_info) { - if (smi_info->intf) { - /* - * The timer and thread are only running if the - * interface has been started up and registered. - */ - if (smi_info->thread != NULL) - kthread_stop(smi_info->thread); + if (smi_info->thread != NULL) + kthread_stop(smi_info->thread); + if (smi_info->timer_running) del_timer_sync(&smi_info->si_timer); - } } -static struct ipmi_default_vals +static const struct ipmi_default_vals { int type; int port; @@ -3165,8 +3517,8 @@ int rv = 0; printk(KERN_INFO PFX "Adding %s-specified %s state machine", - ipmi_addr_src_to_str[new_smi->addr_source], - si_to_str[new_smi->si_type]); + ipmi_addr_src_to_str(new_smi->addr_source), + si_to_str[new_smi->si_type]); mutex_lock(&smi_infos_lock); if (!is_new_interface(new_smi)) { printk(KERN_CONT " duplicate interface\n"); @@ -3196,7 +3548,7 @@ printk(KERN_INFO PFX "Trying %s-specified %s state" " machine at %s address 0x%lx, slave address 0x%x," " irq %d\n", - ipmi_addr_src_to_str[new_smi->addr_source], + ipmi_addr_src_to_str(new_smi->addr_source), si_to_str[new_smi->si_type], addr_space_to_str[new_smi->io.addr_type], new_smi->io.addr_data, @@ -3261,32 +3613,38 @@ setup_oem_data_handler(new_smi); setup_xaction_handlers(new_smi); + check_for_broken_irqs(new_smi); - INIT_LIST_HEAD(&(new_smi->xmit_msgs)); - INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); + new_smi->waiting_msg = NULL; new_smi->curr_msg = NULL; atomic_set(&new_smi->req_events, 0); - new_smi->run_to_completion = 0; + new_smi->run_to_completion = false; for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); - new_smi->interrupt_disabled = 1; - atomic_set(&new_smi->stop_operation, 0); + new_smi->interrupt_disabled = true; + atomic_set(&new_smi->need_watch, 0); new_smi->intf_num = smi_num; smi_num++; rv = try_enable_event_buffer(new_smi); if (rv == 0) - new_smi->has_event_buffer = 1; + new_smi->has_event_buffer = true; /* * Start clearing the flags before we enable interrupts or the * timer to avoid racing with the timer. */ - start_clear_flags(new_smi); - /* IRQ is defined to be set when non-zero. */ - if (new_smi->irq) - new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; + start_clear_flags(new_smi, false); + + /* + * IRQ is defined to be set when non-zero. req_events will + * cause a global flags check that will enable interrupts. + */ + if (new_smi->irq) { + new_smi->interrupt_disabled = false; + atomic_set(&new_smi->req_events, 1); + } if (!new_smi->dev) { /* @@ -3311,14 +3669,13 @@ rv); goto out_err; } - new_smi->dev_registered = 1; + new_smi->dev_registered = true; } rv = ipmi_register_smi(&handlers, new_smi, &new_smi->device_id, new_smi->dev, - "bmc", new_smi->slave_addr); if (rv) { dev_err(new_smi->dev, "Unable to register device: error %d\n", @@ -3356,15 +3713,15 @@ return 0; out_err_stop_timer: - atomic_inc(&new_smi->stop_operation); wait_for_timer_and_thread(new_smi); out_err: - new_smi->interrupt_disabled = 1; + new_smi->interrupt_disabled = true; if (new_smi->intf) { - ipmi_unregister_smi(new_smi->intf); + ipmi_smi_t intf = new_smi->intf; new_smi->intf = NULL; + ipmi_unregister_smi(intf); } if (new_smi->irq_cleanup) { @@ -3396,7 +3753,7 @@ if (new_smi->dev_registered) { platform_device_unregister(new_smi->pdev); - new_smi->dev_registered = 0; + new_smi->dev_registered = false; } return rv; @@ -3451,14 +3808,7 @@ printk(KERN_ERR PFX "Unable to register " "PCI driver: %d\n", rv); else - pci_registered = 1; - } -#endif - -#ifdef CONFIG_ACPI - if (si_tryacpi) { - pnp_register_driver(&ipmi_pnp_driver); - pnp_registered = 1; + pci_registered = true; } #endif @@ -3472,6 +3822,13 @@ spmi_find_bmc(); #endif +#ifdef CONFIG_PARISC + register_parisc_driver(&ipmi_parisc_driver); + parisc_registered = true; + /* poking PC IO addresses will crash machine, don't do it */ + si_trydefaults = 0; +#endif + /* We prefer devices with interrupts, but in the case of a machine with multiple BMCs we assume that there will be several instances of a given type so if we succeed in registering a type then also @@ -3536,57 +3893,49 @@ static void cleanup_one_si(struct smi_info *to_clean) { int rv = 0; - unsigned long flags; if (!to_clean) return; - list_del(&to_clean->link); + if (to_clean->intf) { + ipmi_smi_t intf = to_clean->intf; - /* Tell the driver that we are shutting down. */ - atomic_inc(&to_clean->stop_operation); + to_clean->intf = NULL; + rv = ipmi_unregister_smi(intf); + if (rv) { + pr_err(PFX "Unable to unregister device: errno=%d\n", + rv); + } + } + + if (to_clean->dev) + dev_set_drvdata(to_clean->dev, NULL); + + list_del(&to_clean->link); /* - * Make sure the timer and thread are stopped and will not run - * again. + * Make sure that interrupts, the timer and the thread are + * stopped and will not run again. */ + if (to_clean->irq_cleanup) + to_clean->irq_cleanup(to_clean); wait_for_timer_and_thread(to_clean); /* * Timeouts are stopped, now make sure the interrupts are off - * for the device. A little tricky with locks to make sure - * there are no races. + * in the BMC. Note that timers and CPU interrupts are off, + * so no need for locks. */ - spin_lock_irqsave(&to_clean->si_lock, flags); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { - spin_unlock_irqrestore(&to_clean->si_lock, flags); poll(to_clean); schedule_timeout_uninterruptible(1); - spin_lock_irqsave(&to_clean->si_lock, flags); } - disable_si_irq(to_clean); - spin_unlock_irqrestore(&to_clean->si_lock, flags); + disable_si_irq(to_clean, false); while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { poll(to_clean); schedule_timeout_uninterruptible(1); } - /* Clean up interrupts and make sure that everything is done. */ - if (to_clean->irq_cleanup) - to_clean->irq_cleanup(to_clean); - while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { - poll(to_clean); - schedule_timeout_uninterruptible(1); - } - - if (to_clean->intf) - rv = ipmi_unregister_smi(to_clean->intf); - - if (rv) { - printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", - rv); - } - if (to_clean->handlers) to_clean->handlers->cleanup(to_clean->si_sm); @@ -3614,9 +3963,9 @@ if (pci_registered) pci_unregister_driver(&ipmi_pci_driver); #endif -#ifdef CONFIG_ACPI - if (pnp_registered) - pnp_unregister_driver(&ipmi_pnp_driver); +#ifdef CONFIG_PARISC + if (parisc_registered) + unregister_parisc_driver(&ipmi_parisc_driver); #endif platform_driver_unregister(&ipmi_driver);