@@ -541,7 +541,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* To resolve the race, we can serialize them by disabling the
* tasklet when the latter is running here.
*/
- hv_event_tasklet_disable(channel);
+ tasklet_disable(&channel->callback_event);
/*
* In case a device driver's probe() fails (e.g.,
@@ -608,8 +608,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
- hv_event_tasklet_enable(channel);
-
return ret;
}
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree(channel);
+
+ kfree_rcu(channel, rcu);
}
static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
- list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
+ list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
}
static void percpu_channel_deq(void *arg)
{
struct vmbus_channel *channel = arg;
- list_del(&channel->percpu_list);
+ list_del_rcu(&channel->percpu_list);
}
@@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid)
true);
}
-void hv_event_tasklet_disable(struct vmbus_channel *channel)
-{
- tasklet_disable(&channel->callback_event);
-}
-
-void hv_event_tasklet_enable(struct vmbus_channel *channel)
-{
- tasklet_enable(&channel->callback_event);
-
- /* In case there is any pending event */
- tasklet_schedule(&channel->callback_event);
-}
-
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
{
unsigned long flags;
@@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
BUG_ON(!channel->rescind);
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
- hv_event_tasklet_disable(channel);
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
percpu_channel_deq(channel);
put_cpu();
}
- hv_event_tasklet_enable(channel);
if (channel->primary_channel == NULL) {
list_del(&channel->listentry);
@@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
init_vp_index(newchannel, dev_type);
- hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_enq(newchannel);
put_cpu();
}
- hv_event_tasklet_enable(newchannel);
/*
* This state is used to indicate a successful open
@@ -565,7 +549,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
list_del(&newchannel->listentry);
mutex_unlock(&vmbus_connection.channel_mutex);
- hv_event_tasklet_disable(newchannel);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
@@ -574,7 +557,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
percpu_channel_deq(newchannel);
put_cpu();
}
- hv_event_tasklet_enable(newchannel);
vmbus_release_relid(newchannel->offermsg.child_relid);
@@ -935,8 +935,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (relid == 0)
continue;
+ rcu_read_lock();
+
/* Find channel based on relid */
- list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+ list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
if (channel->offermsg.child_relid != relid)
continue;
@@ -952,6 +954,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
tasklet_schedule(&channel->callback_event);
}
}
+
+ rcu_read_unlock();
}
}
@@ -851,6 +851,13 @@ struct vmbus_channel {
* link up channels based on their CPU affinity.
*/
struct list_head percpu_list;
+
+ /*
+ * Defer freeing channel until after all cpu's have
+ * gone through grace period.
+ */
+ struct rcu_head rcu;
+
/*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
@@ -1447,9 +1454,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_event_tasklet_disable(struct vmbus_channel *channel);
-void hv_event_tasklet_enable(struct vmbus_channel *channel);
-
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
void vmbus_setevent(struct vmbus_channel *channel);
@@ -1592,10 +1596,6 @@ hv_pkt_iter_next(struct vmbus_channel *channel,
return nxt;
}
-#define foreach_vmbus_pkt(pkt, channel) \
- for (pkt = hv_pkt_iter_first(channel); pkt; \
- pkt = hv_pkt_iter_next(channel, pkt))
-
struct vmpipe_proto_header {
u32 pkt_type;
u32 data_size;
@@ -1611,5 +1611,4 @@ struct vmpipe_proto_header {
#define HVSOCK_PKT_LEN(payload_len) (HVSOCK_HEADER_LEN + \
ALIGN((payload_len), 8) + \
PREV_INDICES_LEN)
-
#endif /* _HYPERV_H */
@@ -255,6 +255,7 @@ struct ucred {
#define PF_VSOCK AF_VSOCK
#define PF_KCM AF_KCM
#define PF_QIPCRTR AF_QIPCRTR
+#define PF_SMC AF_SMC
#define PF_HYPERV AF_HYPERV
#define PF_MAX AF_MAX