--- zzzz-none-000/linux-3.10.107/drivers/char/ipmi/ipmi_msghandler.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/char/ipmi/ipmi_msghandler.c 2021-02-04 17:41:59.000000000 +0000 @@ -55,6 +55,9 @@ static int ipmi_init_msghandler(void); static void smi_recv_tasklet(unsigned long); static void handle_new_recv_msgs(ipmi_smi_t intf); +static void need_waiter(ipmi_smi_t intf); +static int handle_one_recv_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *msg); static int initialized; @@ -73,14 +76,28 @@ */ #define MAX_MSG_TIMEOUT 60000 +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 + +/* How many jiffies does it take to get to the timeout time. */ +#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) + +/* + * Request events from the queue every second (this is the number of + * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the + * future, IPMI will add a way to know immediately if an event is in + * the queue and this silliness can go away. + */ +#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) + /* * The main "user" data structure. */ struct ipmi_user { struct list_head link; - /* Set to "0" when the user is destroyed. */ - int valid; + /* Set to false when the user is destroyed. */ + bool valid; struct kref refcount; @@ -92,7 +109,7 @@ ipmi_smi_t intf; /* Does this interface receive IPMI events? */ - int gets_events; + bool gets_events; }; struct cmd_rcvr { @@ -176,25 +193,14 @@ #endif struct bmc_device { - struct platform_device *dev; + struct platform_device pdev; struct ipmi_device_id id; unsigned char guid[16]; int guid_set; - - struct kref refcount; - - /* bmc device attributes */ - struct device_attribute device_id_attr; - struct device_attribute provides_dev_sdrs_attr; - struct device_attribute revision_attr; - struct device_attribute firmware_rev_attr; - struct device_attribute version_attr; - struct device_attribute add_dev_support_attr; - struct device_attribute manufacturer_id_attr; - struct device_attribute product_id_attr; - struct device_attribute guid_attr; - struct device_attribute aux_firmware_rev_attr; + char name[16]; + struct kref usecount; }; +#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) /* * Various statistics for IPMI, these index stats[] in the ipmi_smi @@ -308,6 +314,9 @@ struct kref refcount; + /* Set when the interface is being unregistered. */ + bool in_shutdown; + /* Used for a list of interfaces. */ struct list_head link; @@ -326,7 +335,6 @@ struct bmc_device *bmc; char *my_dev_name; - char *sysfs_name; /* * This is the lower-layer's sender routine. Note that you @@ -334,7 +342,7 @@ * an umpreemptible region to use this. You must fetch the * value into a local variable and make sure it is not NULL. */ - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; void *send_info; #ifdef CONFIG_PROC_FS @@ -362,11 +370,16 @@ * periodic timer interrupt. The tasklet is for handling received * messages directly from the handler. */ - spinlock_t waiting_msgs_lock; - struct list_head waiting_msgs; + spinlock_t waiting_rcv_msgs_lock; + struct list_head waiting_rcv_msgs; atomic_t watchdog_pretimeouts_to_deliver; struct tasklet_struct recv_tasklet; + spinlock_t xmit_msgs_lock; + struct list_head xmit_msgs; + struct ipmi_smi_msg *curr_msg; + struct list_head hp_xmit_msgs; + /* * The list of command receivers that are registered for commands * on this interface. @@ -383,6 +396,9 @@ unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + atomic_t event_waiters; + unsigned int ticks_to_req_ev; + int last_needs_timer; /* * The event receiver for my BMC, only really used at panic @@ -395,7 +411,7 @@ /* For handling of maintenance mode. */ int maintenance_mode; - int maintenance_mode_enable; + bool maintenance_mode_enable; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ @@ -451,12 +467,23 @@ static LIST_HEAD(smi_watchers); static DEFINE_MUTEX(smi_watchers_mutex); - #define ipmi_inc_stat(intf, stat) \ atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) +static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI", + "ACPI", "SMBIOS", "PCI", + "device-tree", "default" }; + +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) +{ + if (src > SI_DEFAULT) + src = 0; /* Invalid */ + return addr_src_to_str[src]; +} +EXPORT_SYMBOL(ipmi_addr_src_to_str); + static int is_lan_addr(struct ipmi_addr *addr) { return addr->addr_type == IPMI_LAN_ADDR_TYPE; @@ -500,7 +527,7 @@ tasklet_kill(&intf->recv_tasklet); - free_smi_msg_list(&intf->waiting_msgs); + free_smi_msg_list(&intf->waiting_rcv_msgs); free_recv_msg_list(&intf->waiting_events); /* @@ -717,7 +744,13 @@ ipmi_inc_stat(intf, unhandled_local_responses); } ipmi_free_recv_msg(msg); - } else { + } else if (!oops_in_progress) { + /* + * If we are running in the panic context, calling the + * receive handler doesn't much meaning and has a deadlock + * risk. At this moment, simply skip it in that case. + */ + ipmi_user_t user = msg->user; user->handler->ipmi_recv_hndl(msg, user->handler_data); } @@ -772,6 +805,7 @@ *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + need_waiter(intf); } else { rv = -EAGAIN; } @@ -941,7 +975,7 @@ new_user->handler = handler; new_user->handler_data = handler_data; new_user->intf = intf; - new_user->gets_events = 0; + new_user->gets_events = false; if (!try_module_get(intf->handlers->owner)) { rv = -ENODEV; @@ -962,10 +996,15 @@ */ mutex_unlock(&ipmi_interfaces_mutex); - new_user->valid = 1; + new_user->valid = true; spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); + if (handler->ipmi_watchdog_pretimeout) { + /* User wants pretimeouts, so make sure to watch for them. */ + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } *user = new_user; return 0; @@ -982,7 +1021,7 @@ { int rv = 0; ipmi_smi_t intf; - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; mutex_lock(&ipmi_interfaces_mutex); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { @@ -1019,7 +1058,13 @@ struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; - user->valid = 0; + user->valid = false; + + if (user->handler->ipmi_watchdog_pretimeout) + atomic_dec(&intf->event_waiters); + + if (user->gets_events) + atomic_dec(&intf->event_waiters); /* Remove the user from the interface's sequence table. */ spin_lock_irqsave(&intf->seq_lock, flags); @@ -1155,25 +1200,23 @@ if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode = mode; intf->maintenance_mode_enable = (intf->auto_maintenance_timeout > 0); break; case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 0; + intf->maintenance_mode_enable = false; break; case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode = mode; - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; break; default: rv = -EINVAL; goto out_unlock; } + intf->maintenance_mode = mode; maintenance_mode_update(intf); } @@ -1184,7 +1227,7 @@ } EXPORT_SYMBOL(ipmi_set_maintenance_mode); -int ipmi_set_gets_events(ipmi_user_t user, int val) +int ipmi_set_gets_events(ipmi_user_t user, bool val) { unsigned long flags; ipmi_smi_t intf = user->intf; @@ -1194,8 +1237,18 @@ INIT_LIST_HEAD(&msgs); spin_lock_irqsave(&intf->events_lock, flags); + if (user->gets_events == val) + goto out; + user->gets_events = val; + if (val) { + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + } else { + atomic_dec(&intf->event_waiters); + } + if (intf->delivering_events) /* * Another thread is delivering events for this, so @@ -1289,6 +1342,9 @@ goto out_unlock; } + if (atomic_inc_return(&intf->event_waiters) == 1) + need_waiter(intf); + list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); out_unlock: @@ -1330,6 +1386,7 @@ mutex_unlock(&intf->cmd_rcvrs_mutex); synchronize_rcu(); while (rcvrs) { + atomic_dec(&intf->event_waiters); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1432,6 +1489,43 @@ smi_msg->msgid = msgid; } +static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *smi_msg, + int priority) +{ + if (intf->curr_msg) { + if (priority > 0) + list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); + else + list_add_tail(&smi_msg->link, &intf->xmit_msgs); + smi_msg = NULL; + } else { + intf->curr_msg = smi_msg; + } + + return smi_msg; +} + + +static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers, + struct ipmi_smi_msg *smi_msg, int priority) +{ + int run_to_completion = intf->run_to_completion; + + if (run_to_completion) { + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + } else { + unsigned long flags; + + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + } + + if (smi_msg) + handlers->sender(intf->send_info, smi_msg); +} + /* * Separate from ipmi_request so that the user does not have to be * supplied in certain circumstances (mainly at panic time). If @@ -1456,7 +1550,6 @@ struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; unsigned long flags; - struct ipmi_smi_handlers *handlers; if (supplied_recv) @@ -1479,8 +1572,7 @@ } rcu_read_lock(); - handlers = intf->handlers; - if (!handlers) { + if (intf->in_shutdown) { rv = -ENODEV; goto out_err; } @@ -1535,7 +1627,7 @@ = IPMI_MAINTENANCE_MODE_TIMEOUT; if (!intf->maintenance_mode && !intf->maintenance_mode_enable) { - intf->maintenance_mode_enable = 1; + intf->maintenance_mode_enable = true; maintenance_mode_update(intf); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, @@ -1815,7 +1907,7 @@ } #endif - handlers->sender(intf->send_info, smi_msg, priority); + smi_send(intf, intf->handlers, smi_msg, priority); rcu_read_unlock(); return 0; @@ -1848,7 +1940,7 @@ int retries, unsigned int retry_time_ms) { - unsigned char saddr, lun; + unsigned char saddr = 0, lun = 0; int rv; if (!user) @@ -1912,7 +2004,9 @@ seq_printf(m, "%x", intf->channels[0].address); for (i = 1; i < IPMI_MAX_CHANNELS; i++) seq_printf(m, " %x", intf->channels[i].address); - return seq_putc(m, '\n'); + seq_putc(m, '\n'); + + return 0; } static int smi_ipmb_proc_open(struct inode *inode, struct file *file) @@ -1931,9 +2025,11 @@ { ipmi_smi_t intf = m->private; - return seq_printf(m, "%u.%u\n", - ipmi_version_major(&intf->bmc->id), - ipmi_version_minor(&intf->bmc->id)); + seq_printf(m, "%u.%u\n", + ipmi_version_major(&intf->bmc->id), + ipmi_version_minor(&intf->bmc->id)); + + return 0; } static int smi_version_proc_open(struct inode *inode, struct file *file) @@ -2112,7 +2208,7 @@ static int __find_bmc_guid(struct device *dev, void *data) { unsigned char *id = data; - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return memcmp(bmc->guid, id, 16) == 0; } @@ -2123,7 +2219,7 @@ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); if (dev) - return dev_get_drvdata(dev); + return to_bmc_device(dev); else return NULL; } @@ -2136,7 +2232,7 @@ static int __find_bmc_prod_dev_id(struct device *dev, void *data) { struct prod_dev_id *id = data; - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return (bmc->id.product_id == id->product_id && bmc->id.device_id == id->device_id); @@ -2154,7 +2250,7 @@ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); if (dev) - return dev_get_drvdata(dev); + return to_bmc_device(dev); else return NULL; } @@ -2163,84 +2259,94 @@ struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "%u\n", bmc->id.device_id); } +static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); -static ssize_t provides_dev_sdrs_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t provides_device_sdrs_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "%u\n", (bmc->id.device_revision & 0x80) >> 7); } +static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, + NULL); static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u\n", bmc->id.device_revision & 0x0F); } +static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); -static ssize_t firmware_rev_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t firmware_revision_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, bmc->id.firmware_revision_2); } +static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); static ssize_t ipmi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "%u.%u\n", ipmi_version_major(&bmc->id), ipmi_version_minor(&bmc->id)); } +static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); static ssize_t add_dev_support_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "0x%02x\n", bmc->id.additional_device_support); } +static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, + NULL); static ssize_t manufacturer_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); } +static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); static ssize_t product_id_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); } +static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); static ssize_t aux_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", bmc->id.aux_firmware_revision[3], @@ -2248,221 +2354,97 @@ bmc->id.aux_firmware_revision[1], bmc->id.aux_firmware_revision[0]); } +static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); static ssize_t guid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct bmc_device *bmc = dev_get_drvdata(dev); + struct bmc_device *bmc = to_bmc_device(dev); return snprintf(buf, 100, "%Lx%Lx\n", (long long) bmc->guid[0], (long long) bmc->guid[8]); } +static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); -static void remove_files(struct bmc_device *bmc) +static struct attribute *bmc_dev_attrs[] = { + &dev_attr_device_id.attr, + &dev_attr_provides_device_sdrs.attr, + &dev_attr_revision.attr, + &dev_attr_firmware_revision.attr, + &dev_attr_ipmi_version.attr, + &dev_attr_additional_device_support.attr, + &dev_attr_manufacturer_id.attr, + &dev_attr_product_id.attr, + &dev_attr_aux_firmware_revision.attr, + &dev_attr_guid.attr, + NULL +}; + +static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) { - if (!bmc->dev) - return; + struct device *dev = kobj_to_dev(kobj); + struct bmc_device *bmc = to_bmc_device(dev); + umode_t mode = attr->mode; + + if (attr == &dev_attr_aux_firmware_revision.attr) + return bmc->id.aux_firmware_revision_set ? mode : 0; + if (attr == &dev_attr_guid.attr) + return bmc->guid_set ? mode : 0; + return mode; +} - device_remove_file(&bmc->dev->dev, - &bmc->device_id_attr); - device_remove_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); - device_remove_file(&bmc->dev->dev, - &bmc->revision_attr); - device_remove_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); - device_remove_file(&bmc->dev->dev, - &bmc->version_attr); - device_remove_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); - device_remove_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); - device_remove_file(&bmc->dev->dev, - &bmc->product_id_attr); - - if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->dev->dev, - &bmc->aux_firmware_rev_attr); - if (bmc->guid_set) - device_remove_file(&bmc->dev->dev, - &bmc->guid_attr); +static struct attribute_group bmc_dev_attr_group = { + .attrs = bmc_dev_attrs, + .is_visible = bmc_dev_attr_is_visible, +}; + +static const struct attribute_group *bmc_dev_attr_groups[] = { + &bmc_dev_attr_group, + NULL +}; + +static struct device_type bmc_device_type = { + .groups = bmc_dev_attr_groups, +}; + +static void +release_bmc_device(struct device *dev) +{ + kfree(to_bmc_device(dev)); } static void cleanup_bmc_device(struct kref *ref) { - struct bmc_device *bmc; - - bmc = container_of(ref, struct bmc_device, refcount); + struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); - remove_files(bmc); - platform_device_unregister(bmc->dev); - kfree(bmc); + platform_device_unregister(&bmc->pdev); } static void ipmi_bmc_unregister(ipmi_smi_t intf) { struct bmc_device *bmc = intf->bmc; - if (intf->sysfs_name) { - sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name); - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; - } + sysfs_remove_link(&intf->si_dev->kobj, "bmc"); if (intf->my_dev_name) { - sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); + sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); kfree(intf->my_dev_name); intf->my_dev_name = NULL; } mutex_lock(&ipmidriver_mutex); - kref_put(&bmc->refcount, cleanup_bmc_device); + kref_put(&bmc->usecount, cleanup_bmc_device); intf->bmc = NULL; mutex_unlock(&ipmidriver_mutex); } -static int create_files(struct bmc_device *bmc) -{ - int err; - - bmc->device_id_attr.attr.name = "device_id"; - bmc->device_id_attr.attr.mode = S_IRUGO; - bmc->device_id_attr.show = device_id_show; - sysfs_attr_init(&bmc->device_id_attr.attr); - - bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; - bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; - bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; - sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr); - - bmc->revision_attr.attr.name = "revision"; - bmc->revision_attr.attr.mode = S_IRUGO; - bmc->revision_attr.show = revision_show; - sysfs_attr_init(&bmc->revision_attr.attr); - - bmc->firmware_rev_attr.attr.name = "firmware_revision"; - bmc->firmware_rev_attr.attr.mode = S_IRUGO; - bmc->firmware_rev_attr.show = firmware_rev_show; - sysfs_attr_init(&bmc->firmware_rev_attr.attr); - - bmc->version_attr.attr.name = "ipmi_version"; - bmc->version_attr.attr.mode = S_IRUGO; - bmc->version_attr.show = ipmi_version_show; - sysfs_attr_init(&bmc->version_attr.attr); - - bmc->add_dev_support_attr.attr.name = "additional_device_support"; - bmc->add_dev_support_attr.attr.mode = S_IRUGO; - bmc->add_dev_support_attr.show = add_dev_support_show; - sysfs_attr_init(&bmc->add_dev_support_attr.attr); - - bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; - bmc->manufacturer_id_attr.attr.mode = S_IRUGO; - bmc->manufacturer_id_attr.show = manufacturer_id_show; - sysfs_attr_init(&bmc->manufacturer_id_attr.attr); - - bmc->product_id_attr.attr.name = "product_id"; - bmc->product_id_attr.attr.mode = S_IRUGO; - bmc->product_id_attr.show = product_id_show; - sysfs_attr_init(&bmc->product_id_attr.attr); - - bmc->guid_attr.attr.name = "guid"; - bmc->guid_attr.attr.mode = S_IRUGO; - bmc->guid_attr.show = guid_show; - sysfs_attr_init(&bmc->guid_attr.attr); - - bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; - bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; - bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; - sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr); - - err = device_create_file(&bmc->dev->dev, - &bmc->device_id_attr); - if (err) - goto out; - err = device_create_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); - if (err) - goto out_devid; - err = device_create_file(&bmc->dev->dev, - &bmc->revision_attr); - if (err) - goto out_sdrs; - err = device_create_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); - if (err) - goto out_rev; - err = device_create_file(&bmc->dev->dev, - &bmc->version_attr); - if (err) - goto out_firm; - err = device_create_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); - if (err) - goto out_version; - err = device_create_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); - if (err) - goto out_add_dev; - err = device_create_file(&bmc->dev->dev, - &bmc->product_id_attr); - if (err) - goto out_manu; - if (bmc->id.aux_firmware_revision_set) { - err = device_create_file(&bmc->dev->dev, - &bmc->aux_firmware_rev_attr); - if (err) - goto out_prod_id; - } - if (bmc->guid_set) { - err = device_create_file(&bmc->dev->dev, - &bmc->guid_attr); - if (err) - goto out_aux_firm; - } - - return 0; - -out_aux_firm: - if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->dev->dev, - &bmc->aux_firmware_rev_attr); -out_prod_id: - device_remove_file(&bmc->dev->dev, - &bmc->product_id_attr); -out_manu: - device_remove_file(&bmc->dev->dev, - &bmc->manufacturer_id_attr); -out_add_dev: - device_remove_file(&bmc->dev->dev, - &bmc->add_dev_support_attr); -out_version: - device_remove_file(&bmc->dev->dev, - &bmc->version_attr); -out_firm: - device_remove_file(&bmc->dev->dev, - &bmc->firmware_rev_attr); -out_rev: - device_remove_file(&bmc->dev->dev, - &bmc->revision_attr); -out_sdrs: - device_remove_file(&bmc->dev->dev, - &bmc->provides_dev_sdrs_attr); -out_devid: - device_remove_file(&bmc->dev->dev, - &bmc->device_id_attr); -out: - return err; -} - -static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, - const char *sysfs_name) +static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) { int rv; struct bmc_device *bmc = intf->bmc; struct bmc_device *old_bmc; - int size; - char dummy[1]; mutex_lock(&ipmidriver_mutex); @@ -2486,7 +2468,7 @@ intf->bmc = old_bmc; bmc = old_bmc; - kref_get(&bmc->refcount); + kref_get(&bmc->usecount); mutex_unlock(&ipmidriver_mutex); printk(KERN_INFO @@ -2496,12 +2478,12 @@ bmc->id.product_id, bmc->id.device_id); } else { - char name[14]; unsigned char orig_dev_id = bmc->id.device_id; int warn_printed = 0; - snprintf(name, sizeof(name), + snprintf(bmc->name, sizeof(bmc->name), "ipmi_bmc.%4.4x", bmc->id.product_id); + bmc->pdev.name = bmc->name; while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, bmc->id.product_id, @@ -2525,23 +2507,16 @@ } } - bmc->dev = platform_device_alloc(name, bmc->id.device_id); - if (!bmc->dev) { - mutex_unlock(&ipmidriver_mutex); - printk(KERN_ERR - "ipmi_msghandler:" - " Unable to allocate platform device\n"); - return -ENOMEM; - } - bmc->dev->dev.driver = &ipmidriver.driver; - dev_set_drvdata(&bmc->dev->dev, bmc); - kref_init(&bmc->refcount); + bmc->pdev.dev.driver = &ipmidriver.driver; + bmc->pdev.id = bmc->id.device_id; + bmc->pdev.dev.release = release_bmc_device; + bmc->pdev.dev.type = &bmc_device_type; + kref_init(&bmc->usecount); - rv = platform_device_add(bmc->dev); + rv = platform_device_register(&bmc->pdev); mutex_unlock(&ipmidriver_mutex); if (rv) { - platform_device_put(bmc->dev); - bmc->dev = NULL; + put_device(&bmc->pdev.dev); printk(KERN_ERR "ipmi_msghandler:" " Unable to register bmc device: %d\n", @@ -2553,15 +2528,6 @@ return rv; } - rv = create_files(bmc); - if (rv) { - mutex_lock(&ipmidriver_mutex); - platform_device_unregister(bmc->dev); - mutex_unlock(&ipmidriver_mutex); - - return rv; - } - dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, @@ -2573,44 +2539,26 @@ * create symlink from system interface device to bmc device * and back. */ - intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); - if (!intf->sysfs_name) { - rv = -ENOMEM; - printk(KERN_ERR - "ipmi_msghandler: allocate link to BMC: %d\n", - rv); - goto out_err; - } - - rv = sysfs_create_link(&intf->si_dev->kobj, - &bmc->dev->dev.kobj, intf->sysfs_name); + rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; printk(KERN_ERR "ipmi_msghandler: Unable to create bmc symlink: %d\n", rv); goto out_err; } - size = snprintf(dummy, 0, "ipmi%d", ifnum); - intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); + intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum); if (!intf->my_dev_name) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; rv = -ENOMEM; printk(KERN_ERR "ipmi_msghandler: allocate link from BMC: %d\n", rv); goto out_err; } - snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum); - rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, + rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->sysfs_name); - intf->sysfs_name = NULL; kfree(intf->my_dev_name); intf->my_dev_name = NULL; printk(KERN_ERR @@ -2755,7 +2703,6 @@ = IPMI_CHANNEL_MEDIUM_IPMB; intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; - rv = -ENOSYS; intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); @@ -2780,12 +2727,12 @@ if (rv) { /* Got an error somehow, just give up. */ + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " %d: %d\n", intf->curr_channel, rv); + intf->curr_channel = IPMI_MAX_CHANNELS; wake_up(&intf->waitq); - - printk(KERN_WARNING PFX - "Error sending channel information: %d\n", - rv); } } out: @@ -2806,11 +2753,10 @@ } EXPORT_SYMBOL(ipmi_poll_interface); -int ipmi_register_smi(struct ipmi_smi_handlers *handlers, +int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *si_dev, - const char *sysfs_name, unsigned char slave_addr) { int i, j; @@ -2869,13 +2815,18 @@ #ifdef CONFIG_PROC_FS mutex_init(&intf->proc_entry_lock); #endif - spin_lock_init(&intf->waiting_msgs_lock); - INIT_LIST_HEAD(&intf->waiting_msgs); + spin_lock_init(&intf->waiting_rcv_msgs_lock); + INIT_LIST_HEAD(&intf->waiting_rcv_msgs); tasklet_init(&intf->recv_tasklet, smi_recv_tasklet, (unsigned long) intf); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); + spin_lock_init(&intf->xmit_msgs_lock); + INIT_LIST_HEAD(&intf->xmit_msgs); + INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); + atomic_set(&intf->event_waiters, 0); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; mutex_init(&intf->cmd_rcvrs_mutex); @@ -2921,8 +2872,12 @@ intf->null_user_handler = channel_handler; intf->curr_channel = 0; rv = send_channel_info_cmd(intf, 0); - if (rv) + if (rv) { + printk(KERN_WARNING PFX + "Error sending channel information for channel" + " 0, %d\n", rv); goto out; + } /* Wait for the channel info to be read. */ wait_event(intf->waitq, @@ -2938,7 +2893,7 @@ if (rv == 0) rv = add_proc_entries(intf, i); - rv = ipmi_bmc_register(intf, i, sysfs_name); + rv = ipmi_bmc_register(intf, i); out: if (rv) { @@ -2968,12 +2923,50 @@ } EXPORT_SYMBOL(ipmi_register_smi); +static void deliver_smi_err_response(ipmi_smi_t intf, + struct ipmi_smi_msg *msg, + unsigned char err) +{ + msg->rsp[0] = msg->data[0] | 4; + msg->rsp[1] = msg->data[1]; + msg->rsp[2] = err; + msg->rsp_size = 3; + /* It's an error, so it will never requeue, no need to check return. */ + handle_one_recv_msg(intf, msg); +} + static void cleanup_smi_msgs(ipmi_smi_t intf) { int i; struct seq_table *ent; + struct ipmi_smi_msg *msg; + struct list_head *entry; + struct list_head tmplist; + + /* Clear out our transmit queues and hold the messages. */ + INIT_LIST_HEAD(&tmplist); + list_splice_tail(&intf->hp_xmit_msgs, &tmplist); + list_splice_tail(&intf->xmit_msgs, &tmplist); + + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ + schedule_timeout(1); + } /* No need for locks, the interface is down. */ + + /* + * Return errors for all pending messages in queue and in the + * tables waiting for remote responses. + */ + while (!list_empty(&tmplist)) { + entry = tmplist.next; + list_del(entry); + msg = list_entry(entry, struct ipmi_smi_msg, link); + deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); + } + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { ent = &(intf->seq_table[i]); if (!ent->inuse) @@ -2985,20 +2978,33 @@ int ipmi_unregister_smi(ipmi_smi_t intf) { struct ipmi_smi_watcher *w; - int intf_num = intf->intf_num; + int intf_num = intf->intf_num; + ipmi_user_t user; ipmi_bmc_unregister(intf); mutex_lock(&smi_watchers_mutex); mutex_lock(&ipmi_interfaces_mutex); intf->intf_num = -1; - intf->handlers = NULL; + intf->in_shutdown = true; list_del_rcu(&intf->link); mutex_unlock(&ipmi_interfaces_mutex); synchronize_rcu(); cleanup_smi_msgs(intf); + /* Clean up the effects of users on the lower-level software. */ + mutex_lock(&ipmi_interfaces_mutex); + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + module_put(intf->handlers->owner); + if (intf->handlers->dec_usecount) + intf->handlers->dec_usecount(intf->send_info); + } + rcu_read_unlock(); + intf->handlers = NULL; + mutex_unlock(&ipmi_interfaces_mutex); + remove_proc_entries(intf); /* @@ -3088,7 +3094,6 @@ ipmi_user_t user = NULL; struct ipmi_ipmb_addr *ipmb_addr; struct ipmi_recv_msg *recv_msg; - struct ipmi_smi_handlers *handlers; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3142,9 +3147,8 @@ } #endif rcu_read_lock(); - handlers = intf->handlers; - if (handlers) { - handlers->sender(intf->send_info, msg, 0); + if (!intf->in_shutdown) { + smi_send(intf, intf->handlers, msg, 0); /* * We used the message, so return the value * that causes it to not be freed or @@ -3811,32 +3815,36 @@ /* See if any waiting messages need to be processed. */ if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - while (!list_empty(&intf->waiting_msgs)) { - smi_msg = list_entry(intf->waiting_msgs.next, + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + while (!list_empty(&intf->waiting_rcv_msgs)) { + smi_msg = list_entry(intf->waiting_rcv_msgs.next, struct ipmi_smi_msg, link); list_del(&smi_msg->link); if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); rv = handle_one_recv_msg(intf, smi_msg); if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - if (rv == 0) { - /* Message handled */ - ipmi_free_smi_msg(smi_msg); - } else if (rv < 0) { - /* Fatal error on the message, del but don't free. */ - } else { + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + if (rv > 0) { /* * To preserve message order, quit if we - * can't handle a message. + * can't handle a message. Add the message + * back at the head, this is safe because this + * tasklet is the only thing that pulls the + * messages. */ - list_add(&smi_msg->link, &intf->waiting_msgs); + list_add(&smi_msg->link, &intf->waiting_rcv_msgs); break; + } else { + if (rv == 0) + /* Message handled */ + ipmi_free_smi_msg(smi_msg); + /* If rv < 0, fatal error, del but don't free. */ } } if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); /* * If the pretimout count is non-zero, decrement one from it and @@ -3857,7 +3865,41 @@ static void smi_recv_tasklet(unsigned long val) { - handle_new_recv_msgs((ipmi_smi_t) val); + unsigned long flags = 0; /* keep us warning-free. */ + ipmi_smi_t intf = (ipmi_smi_t) val; + int run_to_completion = intf->run_to_completion; + struct ipmi_smi_msg *newmsg = NULL; + + /* + * Start the next message if available. + * + * Do this here, not in the actual receiver, because we may deadlock + * because the lower layer is allowed to hold locks while calling + * message delivery. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + if (intf->curr_msg == NULL && !intf->in_shutdown) { + struct list_head *entry = NULL; + + /* Pick the high priority queue first. */ + if (!list_empty(&intf->hp_xmit_msgs)) + entry = intf->hp_xmit_msgs.next; + else if (!list_empty(&intf->xmit_msgs)) + entry = intf->xmit_msgs.next; + + if (entry) { + list_del(entry); + newmsg = list_entry(entry, struct ipmi_smi_msg, link); + intf->curr_msg = newmsg; + } + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + if (newmsg) + intf->handlers->sender(intf->send_info, newmsg); + + handle_new_recv_msgs(intf); } /* Handle a new message from the lower layer. */ @@ -3865,13 +3907,16 @@ struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ - int run_to_completion; - + int run_to_completion = intf->run_to_completion; if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + /* * This is the local response to a command send, start * the timer for these. The user_data will not be @@ -3907,29 +3952,44 @@ /* The message was sent, start the timer. */ intf_start_seq_timer(intf, msg->msgid); +free_msg: ipmi_free_smi_msg(msg); - goto out; + } else { + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); } + if (!run_to_completion) + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); /* - * To preserve message order, if the list is not empty, we - * tack this message onto the end of the list. + * We can get an asynchronous event or receive message in addition + * to commands we send. */ - run_to_completion = intf->run_to_completion; - if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_msgs); + if (msg == intf->curr_msg) + intf->curr_msg = NULL; if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - tasklet_schedule(&intf->recv_tasklet); - out: - return; + if (run_to_completion) + smi_recv_tasklet((unsigned long) intf); + else + tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_msg_received); void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) { + if (intf->in_shutdown) + return; + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); tasklet_schedule(&intf->recv_tasklet); } @@ -3965,20 +4025,23 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, struct list_head *timeouts, long timeout_period, - int slot, unsigned long *flags) + int slot, unsigned long *flags, + unsigned int *waiting_msgs) { struct ipmi_recv_msg *msg; - struct ipmi_smi_handlers *handlers; + const struct ipmi_smi_handlers *handlers; - if (intf->intf_num == -1) + if (intf->in_shutdown) return; if (!ent->inuse) return; ent->timeout -= timeout_period; - if (ent->timeout > 0) + if (ent->timeout > 0) { + (*waiting_msgs)++; return; + } if (ent->retries_left == 0) { /* The message has used all its retries. */ @@ -3995,6 +4058,8 @@ struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ + (*waiting_msgs)++; + /* * Start with the max timer, set to normal timer after * the message is sent. @@ -4031,8 +4096,7 @@ ipmi_inc_stat(intf, retransmitted_ipmb_commands); - intf->handlers->sender(intf->send_info, - smi_msg, 0); + smi_send(intf, handlers, smi_msg, 0); } else ipmi_free_smi_msg(smi_msg); @@ -4040,122 +4104,119 @@ } } -static void ipmi_timeout_handler(long timeout_period) +static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) { - ipmi_smi_t intf; struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; + unsigned int waiting_msgs = 0; - rcu_read_lock(); - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - tasklet_schedule(&intf->recv_tasklet); - - /* - * Go through the seq table and find any messages that - * have timed out, putting them in the timeouts - * list. - */ - INIT_LIST_HEAD(&timeouts); - spin_lock_irqsave(&intf->seq_lock, flags); - for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) - check_msg_timeout(intf, &(intf->seq_table[i]), - &timeouts, timeout_period, i, - &flags); - spin_unlock_irqrestore(&intf->seq_lock, flags); + /* + * Go through the seq table and find any messages that + * have timed out, putting them in the timeouts + * list. + */ + INIT_LIST_HEAD(&timeouts); + spin_lock_irqsave(&intf->seq_lock, flags); + for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) + check_msg_timeout(intf, &(intf->seq_table[i]), + &timeouts, timeout_period, i, + &flags, &waiting_msgs); + spin_unlock_irqrestore(&intf->seq_lock, flags); - list_for_each_entry_safe(msg, msg2, &timeouts, link) - deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); + list_for_each_entry_safe(msg, msg2, &timeouts, link) + deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); - /* - * Maintenance mode handling. Check the timeout - * optimistically before we claim the lock. It may - * mean a timeout gets missed occasionally, but that - * only means the timeout gets extended by one period - * in that case. No big deal, and it avoids the lock - * most of the time. - */ + /* + * Maintenance mode handling. Check the timeout + * optimistically before we claim the lock. It may + * mean a timeout gets missed occasionally, but that + * only means the timeout gets extended by one period + * in that case. No big deal, and it avoids the lock + * most of the time. + */ + if (intf->auto_maintenance_timeout > 0) { + spin_lock_irqsave(&intf->maintenance_mode_lock, flags); if (intf->auto_maintenance_timeout > 0) { - spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - if (intf->auto_maintenance_timeout > 0) { - intf->auto_maintenance_timeout - -= timeout_period; - if (!intf->maintenance_mode - && (intf->auto_maintenance_timeout <= 0)) { - intf->maintenance_mode_enable = 0; - maintenance_mode_update(intf); - } + intf->auto_maintenance_timeout + -= timeout_period; + if (!intf->maintenance_mode + && (intf->auto_maintenance_timeout <= 0)) { + intf->maintenance_mode_enable = false; + maintenance_mode_update(intf); } - spin_unlock_irqrestore(&intf->maintenance_mode_lock, - flags); } + spin_unlock_irqrestore(&intf->maintenance_mode_lock, + flags); } - rcu_read_unlock(); + + tasklet_schedule(&intf->recv_tasklet); + + return waiting_msgs; } -static void ipmi_request_event(void) +static void ipmi_request_event(ipmi_smi_t intf) { - ipmi_smi_t intf; - struct ipmi_smi_handlers *handlers; - - rcu_read_lock(); - /* - * Called from the timer, no need to check if handlers is - * valid. - */ - list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) - continue; + /* No event requests when in maintenance mode. */ + if (intf->maintenance_mode_enable) + return; - handlers = intf->handlers; - if (handlers) - handlers->request_events(intf->send_info); - } - rcu_read_unlock(); + if (!intf->in_shutdown) + intf->handlers->request_events(intf->send_info); } static struct timer_list ipmi_timer; -/* Call every ~1000 ms. */ -#define IPMI_TIMEOUT_TIME 1000 - -/* How many jiffies does it take to get to the timeout time. */ -#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) - -/* - * Request events from the queue every second (this is the number of - * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the - * future, IPMI will add a way to know immediately if an event is in - * the queue and this silliness can go away. - */ -#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) - static atomic_t stop_operation; -static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; static void ipmi_timeout(unsigned long data) { + ipmi_smi_t intf; + int nt = 0; + if (atomic_read(&stop_operation)) return; - ticks_to_req_ev--; - if (ticks_to_req_ev == 0) { - ipmi_request_event(); - ticks_to_req_ev = IPMI_REQUEST_EV_TIME; - } + rcu_read_lock(); + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { + int lnt = 0; + + if (atomic_read(&intf->event_waiters)) { + intf->ticks_to_req_ev--; + if (intf->ticks_to_req_ev == 0) { + ipmi_request_event(intf); + intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; + } + lnt++; + } - ipmi_timeout_handler(IPMI_TIMEOUT_TIME); + lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); + lnt = !!lnt; + if (lnt != intf->last_needs_timer && + intf->handlers->set_need_watch) + intf->handlers->set_need_watch(intf->send_info, lnt); + intf->last_needs_timer = lnt; + + nt += lnt; + } + rcu_read_unlock(); + + if (nt) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } +static void need_waiter(ipmi_smi_t intf) +{ + /* Racy, but worst case we start the timer twice. */ + if (!timer_pending(&ipmi_timer)) + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); +} static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); -/* FIXME - convert these to slabs. */ static void free_smi_msg(struct ipmi_smi_msg *msg) { atomic_dec(&smi_msg_inuse_count); @@ -4244,6 +4305,9 @@ 0, 1); /* Don't retry, and don't wait. */ if (rv) atomic_sub(2, &panic_done_count); + else if (intf->handlers->flush_messages) + intf->handlers->flush_messages(intf->send_info); + while (atomic_read(&panic_done_count) != 0) ipmi_poll(intf); } @@ -4317,9 +4381,7 @@ /* Interface is not ready. */ continue; - intf->run_to_completion = 1; /* Send the event announcing the panic. */ - intf->handlers->set_run_to_completion(intf->send_info, 1); ipmi_panic_request_and_wait(intf, &addr, &msg); } @@ -4459,6 +4521,23 @@ /* Interface is not ready. */ continue; + /* + * If we were interrupted while locking xmit_msgs_lock or + * waiting_rcv_msgs_lock, the corresponding list may be + * corrupted. In this case, drop items on the list for + * the safety. + */ + if (!spin_trylock(&intf->xmit_msgs_lock)) { + INIT_LIST_HEAD(&intf->xmit_msgs); + INIT_LIST_HEAD(&intf->hp_xmit_msgs); + } else + spin_unlock(&intf->xmit_msgs_lock); + + if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) + INIT_LIST_HEAD(&intf->waiting_rcv_msgs); + else + spin_unlock(&intf->waiting_rcv_msgs_lock); + intf->run_to_completion = 1; intf->handlers->set_run_to_completion(intf->send_info, 1); } @@ -4496,6 +4575,7 @@ proc_ipmi_root = proc_mkdir("ipmi", NULL); if (!proc_ipmi_root) { printk(KERN_ERR PFX "Unable to create IPMI proc dir"); + driver_unregister(&ipmidriver.driver); return -ENOMEM; }