--- zzzz-none-000/linux-3.10.107/drivers/hv/channel_mgmt.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/hv/channel_mgmt.c 2021-02-04 17:41:59.000000000 +0000 @@ -28,15 +28,13 @@ #include #include #include +#include #include #include "hyperv_vmbus.h" -struct vmbus_channel_message_table_entry { - enum vmbus_channel_message_type message_type; - void (*message_handler)(struct vmbus_channel_message_header *msg); -}; - +static void init_vp_index(struct vmbus_channel *channel, + const uuid_le *type_guid); /** * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message @@ -48,30 +46,39 @@ * @negop is of type &struct icmsg_negotiate. * Set up and fill in default negotiate response message. * - * The max_fw_version specifies the maximum framework version that - * we can support and max _srv_version specifies the maximum service - * version we can support. A special value MAX_SRV_VER can be - * specified to indicate that we can handle the maximum version - * exposed by the host. + * The fw_version specifies the framework version that + * we can support and srv_version specifies the service + * version we can support. * * Mainly used by Hyper-V drivers. */ -void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, +bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, struct icmsg_negotiate *negop, u8 *buf, - int max_fw_version, int max_srv_version) + int fw_version, int srv_version) { - int icframe_vercnt; - int icmsg_vercnt; + int icframe_major, icframe_minor; + int icmsg_major, icmsg_minor; + int fw_major, fw_minor; + int srv_major, srv_minor; int i; + bool found_match = false; icmsghdrp->icmsgsize = 0x10; + fw_major = (fw_version >> 16); + fw_minor = (fw_version & 0xFFFF); + + srv_major = (srv_version >> 16); + srv_minor = (srv_version & 0xFFFF); negop = (struct icmsg_negotiate *)&buf[ sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; - icframe_vercnt = negop->icframe_vercnt; - icmsg_vercnt = negop->icmsg_vercnt; + icframe_major = negop->icframe_vercnt; + icframe_minor = 0; + + icmsg_major = negop->icmsg_vercnt; + icmsg_minor = 0; /* * Select the framework version number we will @@ -79,26 +86,48 @@ */ for (i = 0; i < negop->icframe_vercnt; i++) { - if (negop->icversion_data[i].major <= max_fw_version) - icframe_vercnt = negop->icversion_data[i].major; + if ((negop->icversion_data[i].major == fw_major) && + (negop->icversion_data[i].minor == fw_minor)) { + icframe_major = negop->icversion_data[i].major; + icframe_minor = negop->icversion_data[i].minor; + found_match = true; + } } + if (!found_match) + goto fw_error; + + found_match = false; + for (i = negop->icframe_vercnt; (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) { - if (negop->icversion_data[i].major <= max_srv_version) - icmsg_vercnt = negop->icversion_data[i].major; + if ((negop->icversion_data[i].major == srv_major) && + (negop->icversion_data[i].minor == srv_minor)) { + icmsg_major = negop->icversion_data[i].major; + icmsg_minor = negop->icversion_data[i].minor; + found_match = true; + } } /* - * Respond with the maximum framework and service + * Respond with the framework and service * version numbers we can support. */ - negop->icframe_vercnt = 1; - negop->icmsg_vercnt = 1; - negop->icversion_data[0].major = icframe_vercnt; - negop->icversion_data[0].minor = 0; - negop->icversion_data[1].major = icmsg_vercnt; - negop->icversion_data[1].minor = 0; + +fw_error: + if (!found_match) { + negop->icframe_vercnt = 0; + negop->icmsg_vercnt = 0; + } else { + negop->icframe_vercnt = 1; + negop->icmsg_vercnt = 1; + } + + negop->icversion_data[0].major = icframe_major; + negop->icversion_data[0].minor = icframe_minor; + negop->icversion_data[1].major = icmsg_major; + negop->icversion_data[1].minor = icmsg_minor; + return found_match; } EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp); @@ -108,86 +137,106 @@ */ static struct vmbus_channel *alloc_channel(void) { + static atomic_t chan_num = ATOMIC_INIT(0); struct vmbus_channel *channel; channel = kzalloc(sizeof(*channel), GFP_ATOMIC); if (!channel) return NULL; + channel->id = atomic_inc_return(&chan_num); spin_lock_init(&channel->inbound_lock); + spin_lock_init(&channel->lock); - channel->controlwq = create_workqueue("hv_vmbus_ctl"); - if (!channel->controlwq) { - kfree(channel); - return NULL; - } + INIT_LIST_HEAD(&channel->sc_list); + INIT_LIST_HEAD(&channel->percpu_list); return channel; } /* - * release_hannel - Release the vmbus channel object itself + * free_channel - Release the resources used by the vmbus channel object */ -static void release_channel(struct work_struct *work) +static void free_channel(struct vmbus_channel *channel) { - struct vmbus_channel *channel = container_of(work, - struct vmbus_channel, - work); - - destroy_workqueue(channel->controlwq); - kfree(channel); } -/* - * free_channel - Release the resources used by the vmbus channel object - */ -static void free_channel(struct vmbus_channel *channel) +static void percpu_channel_enq(void *arg) { + struct vmbus_channel *channel = arg; + int cpu = smp_processor_id(); - /* - * We have to release the channel's workqueue/thread in the vmbus's - * workqueue/thread context - * ie we can't destroy ourselves. - */ - INIT_WORK(&channel->work, release_channel); - queue_work(vmbus_connection.work_queue, &channel->work); + list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); } +static void percpu_channel_deq(void *arg) +{ + struct vmbus_channel *channel = arg; + list_del(&channel->percpu_list); +} -/* - * vmbus_process_rescind_offer - - * Rescind the offer by initiating a device removal - */ -static void vmbus_process_rescind_offer(struct work_struct *work) + +void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) { - struct vmbus_channel *channel = container_of(work, - struct vmbus_channel, - work); - unsigned long flags; struct vmbus_channel_relid_released msg; + unsigned long flags; + struct vmbus_channel *primary_channel; - vmbus_device_unregister(channel->device_obj); memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); - msg.child_relid = channel->offermsg.child_relid; + msg.child_relid = relid; msg.header.msgtype = CHANNELMSG_RELID_RELEASED; vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); - list_del(&channel->listentry); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + if (channel == NULL) + return; + + BUG_ON(!channel->rescind); + + if (channel->target_cpu != get_cpu()) { + put_cpu(); + smp_call_function_single(channel->target_cpu, + percpu_channel_deq, channel, true); + } else { + percpu_channel_deq(channel); + put_cpu(); + } + + if (channel->primary_channel == NULL) { + spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + list_del(&channel->listentry); + spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + + primary_channel = channel; + } else { + primary_channel = channel->primary_channel; + spin_lock_irqsave(&primary_channel->lock, flags); + list_del(&channel->sc_list); + primary_channel->num_sc--; + spin_unlock_irqrestore(&primary_channel->lock, flags); + } + + /* + * We need to free the bit for init_vp_index() to work in the case + * of sub-channel, when we reload drivers like hv_netvsc. + */ + cpumask_clear_cpu(channel->target_cpu, + &primary_channel->alloced_cpus_in_node); + free_channel(channel); } void vmbus_free_channels(void) { - struct vmbus_channel *channel; + struct vmbus_channel *channel, *tmp; + + list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list, + listentry) { + /* hv_process_channel_removal() needs this */ + channel->rescind = true; - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { vmbus_device_unregister(channel->device_obj); - kfree(channel->device_obj); - free_channel(channel); } } @@ -195,19 +244,12 @@ * vmbus_process_offer - Process the offer by creating a channel/device * associated with this offer */ -static void vmbus_process_offer(struct work_struct *work) +static void vmbus_process_offer(struct vmbus_channel *newchannel) { - struct vmbus_channel *newchannel = container_of(work, - struct vmbus_channel, - work); struct vmbus_channel *channel; bool fnew = true; - int ret; unsigned long flags; - /* The next possible work is rescind handling */ - INIT_WORK(&newchannel->work, vmbus_process_rescind_offer); - /* Make sure this is a new offer */ spin_lock_irqsave(&vmbus_connection.channel_lock, flags); @@ -228,7 +270,44 @@ spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); if (!fnew) { - free_channel(newchannel); + /* + * Check to see if this is a sub-channel. + */ + if (newchannel->offermsg.offer.sub_channel_index != 0) { + /* + * Process the sub-channel. + */ + newchannel->primary_channel = channel; + spin_lock_irqsave(&channel->lock, flags); + list_add_tail(&newchannel->sc_list, &channel->sc_list); + channel->num_sc++; + spin_unlock_irqrestore(&channel->lock, flags); + } else + goto err_free_chan; + } + + init_vp_index(newchannel, &newchannel->offermsg.offer.if_type); + + if (newchannel->target_cpu != get_cpu()) { + put_cpu(); + smp_call_function_single(newchannel->target_cpu, + percpu_channel_enq, + newchannel, true); + } else { + percpu_channel_enq(newchannel); + put_cpu(); + } + + /* + * This state is used to indicate a successful open + * so that when we do close the channel normally, we + * can cleanup properly + */ + newchannel->state = CHANNEL_OPEN_STATE; + + if (!fnew) { + if (channel->sc_creation_callback != NULL) + channel->sc_creation_callback(newchannel); return; } @@ -241,37 +320,45 @@ &newchannel->offermsg.offer.if_type, &newchannel->offermsg.offer.if_instance, newchannel); + if (!newchannel->device_obj) + goto err_deq_chan; /* * Add the new device to the bus. This will kick off device-driver * binding which eventually invokes the device driver's AddDevice() * method. */ - ret = vmbus_device_register(newchannel->device_obj); - if (ret != 0) { + if (vmbus_device_register(newchannel->device_obj) != 0) { pr_err("unable to add child device object (relid %d)\n", - newchannel->offermsg.child_relid); - - spin_lock_irqsave(&vmbus_connection.channel_lock, flags); - list_del(&newchannel->listentry); - spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + newchannel->offermsg.child_relid); kfree(newchannel->device_obj); + goto err_deq_chan; + } + return; + +err_deq_chan: + spin_lock_irqsave(&vmbus_connection.channel_lock, flags); + list_del(&newchannel->listentry); + spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); - free_channel(newchannel); + if (newchannel->target_cpu != get_cpu()) { + put_cpu(); + smp_call_function_single(newchannel->target_cpu, + percpu_channel_deq, newchannel, true); } else { - /* - * This state is used to indicate a successful open - * so that when we do close the channel normally, we - * can cleanup properly - */ - newchannel->state = CHANNEL_OPEN_STATE; + percpu_channel_deq(newchannel); + put_cpu(); } + +err_free_chan: + free_channel(newchannel); } enum { IDE = 0, SCSI, NIC, + ND_NIC, MAX_PERF_CHN, }; @@ -287,29 +374,36 @@ { HV_SCSI_GUID, }, /* Network */ { HV_NIC_GUID, }, + /* NetworkDirect Guest RDMA */ + { HV_ND_GUID, }, }; /* * We use this state to statically distribute the channel interrupt load. */ -static u32 next_vp; +static int next_numa_node_id; /* * Starting with Win8, we can statically distribute the incoming - * channel interrupt load by binding a channel to VCPU. We - * implement here a simple round robin scheme for distributing - * the interrupt load. - * We will bind channels that are not performance critical to cpu 0 and - * performance critical channels (IDE, SCSI and Network) will be uniformly - * distributed across all available CPUs. + * channel interrupt load by binding a channel to VCPU. + * We do this in a hierarchical fashion: + * First distribute the primary channels across available NUMA nodes + * and then distribute the subchannels amongst the CPUs in the NUMA + * node assigned to the primary channel. + * + * For pre-win8 hosts or non-performance critical channels we assign the + * first CPU in the first NUMA node. */ -static u32 get_vp_index(uuid_le *type_guid) +static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) { u32 cur_cpu; int i; bool perf_chn = false; - u32 max_cpus = num_online_cpus(); + struct vmbus_channel *primary = channel->primary_channel; + int next_node; + struct cpumask available_mask; + struct cpumask *alloced_mask; for (i = IDE; i < MAX_PERF_CHN; i++) { if (!memcmp(type_guid->b, hp_devs[i].guid, @@ -326,10 +420,153 @@ * Also if the channel is not a performance critical * channel, bind it to cpu 0. */ - return 0; + channel->numa_node = 0; + channel->target_cpu = 0; + channel->target_vp = hv_context.vp_index[0]; + return; + } + + /* + * We distribute primary channels evenly across all the available + * NUMA nodes and within the assigned NUMA node we will assign the + * first available CPU to the primary channel. + * The sub-channels will be assigned to the CPUs available in the + * NUMA node evenly. + */ + if (!primary) { + while (true) { + next_node = next_numa_node_id++; + if (next_node == nr_node_ids) + next_node = next_numa_node_id = 0; + if (cpumask_empty(cpumask_of_node(next_node))) + continue; + break; + } + channel->numa_node = next_node; + primary = channel; + } + alloced_mask = &hv_context.hv_numa_map[primary->numa_node]; + + if (cpumask_weight(alloced_mask) == + cpumask_weight(cpumask_of_node(primary->numa_node))) { + /* + * We have cycled through all the CPUs in the node; + * reset the alloced map. + */ + cpumask_clear(alloced_mask); } - cur_cpu = (++next_vp % max_cpus); - return hv_context.vp_index[cur_cpu]; + + cpumask_xor(&available_mask, alloced_mask, + cpumask_of_node(primary->numa_node)); + + cur_cpu = -1; + + /* + * Normally Hyper-V host doesn't create more subchannels than there + * are VCPUs on the node but it is possible when not all present VCPUs + * on the node are initialized by guest. Clear the alloced_cpus_in_node + * to start over. + */ + if (cpumask_equal(&primary->alloced_cpus_in_node, + cpumask_of_node(primary->numa_node))) + cpumask_clear(&primary->alloced_cpus_in_node); + + while (true) { + cur_cpu = cpumask_next(cur_cpu, &available_mask); + if (cur_cpu >= nr_cpu_ids) { + cur_cpu = -1; + cpumask_copy(&available_mask, + cpumask_of_node(primary->numa_node)); + continue; + } + + /* + * NOTE: in the case of sub-channel, we clear the sub-channel + * related bit(s) in primary->alloced_cpus_in_node in + * hv_process_channel_removal(), so when we reload drivers + * like hv_netvsc in SMP guest, here we're able to re-allocate + * bit from primary->alloced_cpus_in_node. + */ + if (!cpumask_test_cpu(cur_cpu, + &primary->alloced_cpus_in_node)) { + cpumask_set_cpu(cur_cpu, + &primary->alloced_cpus_in_node); + cpumask_set_cpu(cur_cpu, alloced_mask); + break; + } + } + + channel->target_cpu = cur_cpu; + channel->target_vp = hv_context.vp_index[cur_cpu]; +} + +static void vmbus_wait_for_unload(void) +{ + int cpu = smp_processor_id(); + void *page_addr = hv_context.synic_message_page[cpu]; + struct hv_message *msg = (struct hv_message *)page_addr + + VMBUS_MESSAGE_SINT; + struct vmbus_channel_message_header *hdr; + bool unloaded = false; + + while (1) { + if (msg->header.message_type == HVMSG_NONE) { + mdelay(10); + continue; + } + + hdr = (struct vmbus_channel_message_header *)msg->u.payload; + if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE) + unloaded = true; + + msg->header.message_type = HVMSG_NONE; + /* + * header.message_type needs to be written before we do + * wrmsrl() below. + */ + mb(); + + if (msg->header.message_flags.msg_pending) + wrmsrl(HV_X64_MSR_EOM, 0); + + if (unloaded) + break; + } +} + +/* + * vmbus_unload_response - Handler for the unload response. + */ +static void vmbus_unload_response(struct vmbus_channel_message_header *hdr) +{ + /* + * This is a global event; just wakeup the waiting thread. + * Once we successfully unload, we can cleanup the monitor state. + */ + complete(&vmbus_connection.unload_event); +} + +void vmbus_initiate_unload(void) +{ + struct vmbus_channel_message_header hdr; + + /* Pre-Win2012R2 hosts don't support reconnect */ + if (vmbus_proto_version < VERSION_WIN8_1) + return; + + init_completion(&vmbus_connection.unload_event); + memset(&hdr, 0, sizeof(struct vmbus_channel_message_header)); + hdr.msgtype = CHANNELMSG_UNLOAD; + vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); + + /* + * vmbus_initiate_unload() is also called on crash and the crash can be + * happening in an interrupt context, where scheduling is impossible. + */ + if (!in_interrupt()) + wait_for_completion(&vmbus_connection.unload_event); + else + vmbus_wait_for_unload(); } /* @@ -377,15 +614,12 @@ offer->connection_id; } - newchannel->target_vp = get_vp_index(&offer->offer.if_type); - memcpy(&newchannel->offermsg, offer, sizeof(struct vmbus_channel_offer_channel)); newchannel->monitor_grp = (u8)offer->monitorid / 32; newchannel->monitor_bit = (u8)offer->monitorid % 32; - INIT_WORK(&newchannel->work, vmbus_process_offer); - queue_work(newchannel->controlwq, &newchannel->work); + vmbus_process_offer(newchannel); } /* @@ -397,17 +631,35 @@ { struct vmbus_channel_rescind_offer *rescind; struct vmbus_channel *channel; + unsigned long flags; + struct device *dev; rescind = (struct vmbus_channel_rescind_offer *)hdr; channel = relid2channel(rescind->child_relid); - if (channel == NULL) - /* Just return here, no channel found */ + if (channel == NULL) { + hv_process_channel_removal(NULL, rescind->child_relid); return; + } + + spin_lock_irqsave(&channel->lock, flags); + channel->rescind = true; + spin_unlock_irqrestore(&channel->lock, flags); - /* work is initialized for vmbus_process_rescind_offer() from - * vmbus_process_offer() where the channel got created */ - queue_work(channel->controlwq, &channel->work); + if (channel->device_obj) { + /* + * We will have to unregister this device from the + * driver core. + */ + dev = get_device(&channel->device_obj->device); + if (dev) { + vmbus_device_unregister(channel->device_obj); + put_device(dev); + } + } else { + hv_process_channel_removal(channel, + channel->offermsg.child_relid); + } } /* @@ -592,25 +844,26 @@ } /* Channel message dispatch table */ -static struct vmbus_channel_message_table_entry +struct vmbus_channel_message_table_entry channel_message_table[CHANNELMSG_COUNT] = { - {CHANNELMSG_INVALID, NULL}, - {CHANNELMSG_OFFERCHANNEL, vmbus_onoffer}, - {CHANNELMSG_RESCIND_CHANNELOFFER, vmbus_onoffer_rescind}, - {CHANNELMSG_REQUESTOFFERS, NULL}, - {CHANNELMSG_ALLOFFERS_DELIVERED, vmbus_onoffers_delivered}, - {CHANNELMSG_OPENCHANNEL, NULL}, - {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result}, - {CHANNELMSG_CLOSECHANNEL, NULL}, - {CHANNELMSG_GPADL_HEADER, NULL}, - {CHANNELMSG_GPADL_BODY, NULL}, - {CHANNELMSG_GPADL_CREATED, vmbus_ongpadl_created}, - {CHANNELMSG_GPADL_TEARDOWN, NULL}, - {CHANNELMSG_GPADL_TORNDOWN, vmbus_ongpadl_torndown}, - {CHANNELMSG_RELID_RELEASED, NULL}, - {CHANNELMSG_INITIATE_CONTACT, NULL}, - {CHANNELMSG_VERSION_RESPONSE, vmbus_onversion_response}, - {CHANNELMSG_UNLOAD, NULL}, + {CHANNELMSG_INVALID, 0, NULL}, + {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer}, + {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind}, + {CHANNELMSG_REQUESTOFFERS, 0, NULL}, + {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered}, + {CHANNELMSG_OPENCHANNEL, 0, NULL}, + {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result}, + {CHANNELMSG_CLOSECHANNEL, 0, NULL}, + {CHANNELMSG_GPADL_HEADER, 0, NULL}, + {CHANNELMSG_GPADL_BODY, 0, NULL}, + {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created}, + {CHANNELMSG_GPADL_TEARDOWN, 0, NULL}, + {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown}, + {CHANNELMSG_RELID_RELEASED, 0, NULL}, + {CHANNELMSG_INITIATE_CONTACT, 0, NULL}, + {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, + {CHANNELMSG_UNLOAD, 0, NULL}, + {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, }; /* @@ -675,4 +928,87 @@ return ret; } -/* eof */ +/* + * Retrieve the (sub) channel on which to send an outgoing request. + * When a primary channel has multiple sub-channels, we try to + * distribute the load equally amongst all available channels. + */ +struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary) +{ + struct list_head *cur, *tmp; + int cur_cpu; + struct vmbus_channel *cur_channel; + struct vmbus_channel *outgoing_channel = primary; + int next_channel; + int i = 1; + + if (list_empty(&primary->sc_list)) + return outgoing_channel; + + next_channel = primary->next_oc++; + + if (next_channel > (primary->num_sc)) { + primary->next_oc = 0; + return outgoing_channel; + } + + cur_cpu = hv_context.vp_index[get_cpu()]; + put_cpu(); + list_for_each_safe(cur, tmp, &primary->sc_list) { + cur_channel = list_entry(cur, struct vmbus_channel, sc_list); + if (cur_channel->state != CHANNEL_OPENED_STATE) + continue; + + if (cur_channel->target_vp == cur_cpu) + return cur_channel; + + if (i == next_channel) + return cur_channel; + + i++; + } + + return outgoing_channel; +} +EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel); + +static void invoke_sc_cb(struct vmbus_channel *primary_channel) +{ + struct list_head *cur, *tmp; + struct vmbus_channel *cur_channel; + + if (primary_channel->sc_creation_callback == NULL) + return; + + list_for_each_safe(cur, tmp, &primary_channel->sc_list) { + cur_channel = list_entry(cur, struct vmbus_channel, sc_list); + + primary_channel->sc_creation_callback(cur_channel); + } +} + +void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, + void (*sc_cr_cb)(struct vmbus_channel *new_sc)) +{ + primary_channel->sc_creation_callback = sc_cr_cb; +} +EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback); + +bool vmbus_are_subchannels_present(struct vmbus_channel *primary) +{ + bool ret; + + ret = !list_empty(&primary->sc_list); + + if (ret) { + /* + * Invoke the callback on sub-channel creation. + * This will present a uniform interface to the + * clients. + */ + invoke_sc_cb(primary); + } + + return ret; +} +EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);