--- zzzz-none-000/linux-3.10.107/drivers/s390/cio/css.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/s390/cio/css.c 2021-02-04 17:41:59.000000000 +0000 @@ -44,7 +44,6 @@ int ret; init_subchannel_id(&schid); - ret = -ENODEV; do { do { ret = fn(schid, data); @@ -69,7 +68,8 @@ struct cb_data *cb = data; int rc = 0; - idset_sch_del(cb->set, sch->schid); + if (cb->set) + idset_sch_del(cb->set, sch->schid); if (cb->fn_known_sch) rc = cb->fn_known_sch(sch, cb->data); return rc; @@ -115,6 +115,13 @@ cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; + if (fn_known && !fn_unknown) { + /* Skip idset allocation in case of known-only loop. */ + cb.set = NULL; + return bus_for_each_dev(&css_bus_type, NULL, &cb, + call_fn_known_sch); + } + cb.set = idset_sch_new(); if (!cb.set) /* fall back to brute force scanning in case of oom */ @@ -546,11 +553,16 @@ case -ENOMEM: case -EIO: /* These should abort looping */ + spin_lock_irq(&slow_subchannel_lock); idset_sch_del_subseq(slow_subchannel_set, schid); + spin_unlock_irq(&slow_subchannel_lock); break; default: rc = 0; } + /* Allow scheduling here since the containing loop might + * take a while. */ + cond_resched(); } return rc; } @@ -570,7 +582,7 @@ spin_unlock_irqrestore(&slow_subchannel_lock, flags); } -static DECLARE_WORK(slow_path_work, css_slow_path_func); +static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *cio_work_q; void css_schedule_eval(struct subchannel_id schid) @@ -580,7 +592,7 @@ spin_lock_irqsave(&slow_subchannel_lock, flags); idset_sch_add(slow_subchannel_set, schid); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -591,7 +603,7 @@ spin_lock_irqsave(&slow_subchannel_lock, flags); idset_fill(slow_subchannel_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, 0); spin_unlock_irqrestore(&slow_subchannel_lock, flags); } @@ -604,7 +616,7 @@ return 0; } -static void css_schedule_eval_all_unreg(void) +void css_schedule_eval_all_unreg(unsigned long delay) { unsigned long flags; struct idset *unreg_set; @@ -622,7 +634,7 @@ spin_lock_irqsave(&slow_subchannel_lock, flags); idset_add_set(slow_subchannel_set, unreg_set); atomic_set(&css_eval_scheduled, 1); - queue_work(cio_work_q, &slow_path_work); + queue_delayed_work(cio_work_q, &slow_path_work, delay); spin_unlock_irqrestore(&slow_subchannel_lock, flags); idset_free(unreg_set); } @@ -635,7 +647,8 @@ /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) { - css_schedule_eval_all_unreg(); + /* Schedule with a delay to allow merging of subsequent calls. */ + css_schedule_eval_all_unreg(1 * HZ); } EXPORT_SYMBOL_GPL(css_schedule_reprobe); @@ -689,17 +702,12 @@ css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { -#ifdef CONFIG_SMP css->global_pgid.pgid_high.cpu_addr = stap(); -#else - css->global_pgid.pgid_high.cpu_addr = 0; -#endif } get_cpu_id(&cpu_id); css->global_pgid.cpu_id = cpu_id.ident; css->global_pgid.cpu_model = cpu_id.machine; css->global_pgid.tod_high = tod_high; - } static void @@ -740,7 +748,7 @@ int ret; unsigned long val; - ret = strict_strtoul(buf, 16, &val); + ret = kstrtoul(buf, 16, &val); if (ret) return ret; mutex_lock(&css->mutex); @@ -1075,6 +1083,7 @@ if (chp) chp_update_desc(chp); } + cmf_reactivate(); } #ifdef CONFIG_PROC_FS