@@ -266,6 +266,7 @@ static dma_cap_mask_t dma_cap_mask_all;
struct dma_chan_tbl_ent {
struct dma_chan *chan;
struct dma_chan *prev_chan;
+ int node;
};
/**
@@ -467,6 +468,46 @@ static void dma_channel_quiesce(void)
#endif
}
+/* Assign DMA channels to CPUs according to NUMA affinity relationship */
+static void dma_channel_set(int cap, int cpu, struct dma_chan *chan)
+{
+ int node;
+ int src_cpu;
+ struct dma_chan *src_chan;
+ struct dma_chan_tbl_ent *entry;
+ struct dma_chan_tbl_ent *src_entry;
+
+ entry = per_cpu_ptr(channel_table[cap], cpu);
+ node = dev_to_node(chan->device->dev);
+
+ /* Try to optimize if CPU and DMA channel belong to different node. */
+ if (node != -1 && node != cpu_to_node(cpu)) {
+ for_each_online_cpu(src_cpu) {
+ src_entry = per_cpu_ptr(channel_table[cap], src_cpu);
+ src_chan = src_entry->chan;
+
+ /*
+ * CPU online map may change beneath us due to
+ * CPU hotplug operations.
+ */
+ if (src_chan == NULL)
+ continue;
+
+ if (src_entry->node == node ||
+ cpu_to_node(src_cpu) == node) {
+ entry->node = src_entry->node;
+ src_entry->node = node;
+ entry->chan = src_chan;
+ src_entry->chan = chan;
+ return;
+ }
+ }
+ }
+
+ entry->node = node;
+ entry->chan = chan;
+}
+
/**
* dma_channel_rebalance - redistribute the available channels
*
@@ -501,8 +542,8 @@ static void dma_channel_rebalance(bool quiesce)
chan = nth_chan(cap, n++);
else
chan = nth_chan(cap, -1);
- entry = per_cpu_ptr(channel_table[cap], cpu);
- entry->chan = chan;
+ if (chan)
+ dma_channel_set(cap, cpu, chan);
}
if (quiesce)