if (strcmp("free", parm) == 0) {
                rc = blacklist_parse_parameters(buf, free, 0);
-               /* There could be subchannels without proper devices connected.
-                * evaluate all the entries
+               /*
+                * Evaluate the subchannels without an online device. This way,
+                * no path-verification will be triggered on those subchannels
+                * and it avoids unnecessary delays.
                 */
-               css_schedule_eval_all();
+               css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
        } else if (strcmp("add", parm) == 0)
                rc = blacklist_parse_parameters(buf, add, 0);
        else if (strcmp("purge", parm) == 0)
 
        return 0;
 }
 
-void css_schedule_eval_all_unreg(unsigned long delay)
+static int __unset_online(struct device *dev, void *data)
+{
+       struct idset *set = data;
+       struct subchannel *sch = to_subchannel(dev);
+       struct ccw_device *cdev = sch_get_cdev(sch);
+
+       if (cdev && cdev->online)
+               idset_sch_del(set, sch->schid);
+
+       return 0;
+}
+
+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
 {
        unsigned long flags;
-       struct idset *unreg_set;
+       struct idset *set;
 
        /* Find unregistered subchannels. */
-       unreg_set = idset_sch_new();
-       if (!unreg_set) {
+       set = idset_sch_new();
+       if (!set) {
                /* Fallback. */
                css_schedule_eval_all();
                return;
        }
-       idset_fill(unreg_set);
-       bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+       idset_fill(set);
+       switch (cond) {
+       case CSS_EVAL_UNREG:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+               break;
+       case CSS_EVAL_NOT_ONLINE:
+               bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
+               break;
+       default:
+               break;
+       }
+
        /* Apply to slow_subchannel_set. */
        spin_lock_irqsave(&slow_subchannel_lock, flags);
-       idset_add_set(slow_subchannel_set, unreg_set);
+       idset_add_set(slow_subchannel_set, set);
        atomic_set(&css_eval_scheduled, 1);
        queue_delayed_work(cio_work_q, &slow_path_work, delay);
        spin_unlock_irqrestore(&slow_subchannel_lock, flags);
-       idset_free(unreg_set);
+       idset_free(set);
 }
 
 void css_wait_for_slow_path(void)
 void css_schedule_reprobe(void)
 {
        /* Schedule with a delay to allow merging of subsequent calls. */
-       css_schedule_eval_all_unreg(1 * HZ);
+       css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
 }
 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
 
 
 #define SNID_STATE3_MULTI_PATH    1
 #define SNID_STATE3_SINGLE_PATH           0
 
+/*
+ * Conditions used to specify which subchannels need evaluation
+ */
+enum css_eval_cond {
+       CSS_EVAL_UNREG,         /* unregistered subchannels */
+       CSS_EVAL_NOT_ONLINE     /* sch without an online-device */
+};
+
 struct path_state {
        __u8  state1 : 2;       /* path state value 1 */
        __u8  state2 : 2;       /* path state value 2 */
 /* Helper functions to build lists for the slow path. */
 void css_schedule_eval(struct subchannel_id schid);
 void css_schedule_eval_all(void);
-void css_schedule_eval_all_unreg(unsigned long delay);
+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
 int css_complete_work(void);
 
 int sch_is_pseudo_sch(struct subchannel *);