|
10 | 10 | #include <linux/mm.h>
|
11 | 11 | #include <linux/smp.h>
|
12 | 12 | #include <linux/cpu.h>
|
| 13 | +#include <linux/group_cpus.h> |
13 | 14 |
|
14 | 15 | #include <linux/blk-mq.h>
|
15 | 16 | #include "blk.h"
|
16 | 17 | #include "blk-mq.h"
|
17 | 18 |
|
18 |
| -static int queue_index(struct blk_mq_queue_map *qmap, |
19 |
| - unsigned int nr_queues, const int q) |
20 |
| -{ |
21 |
| - return qmap->queue_offset + (q % nr_queues); |
22 |
| -} |
23 |
| - |
24 |
| -static int get_first_sibling(unsigned int cpu) |
25 |
| -{ |
26 |
| - unsigned int ret; |
27 |
| - |
28 |
| - ret = cpumask_first(topology_sibling_cpumask(cpu)); |
29 |
| - if (ret < nr_cpu_ids) |
30 |
| - return ret; |
31 |
| - |
32 |
| - return cpu; |
33 |
| -} |
34 |
| - |
35 | 19 | void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
36 | 20 | {
|
37 |
| - unsigned int *map = qmap->mq_map; |
38 |
| - unsigned int nr_queues = qmap->nr_queues; |
39 |
| - unsigned int cpu, first_sibling, q = 0; |
40 |
| - |
41 |
| - for_each_possible_cpu(cpu) |
42 |
| - map[cpu] = -1; |
43 |
| - |
44 |
| - /* |
45 |
| - * Spread queues among present CPUs first for minimizing |
46 |
| - * count of dead queues which are mapped by all un-present CPUs |
47 |
| - */ |
48 |
| - for_each_present_cpu(cpu) { |
49 |
| - if (q >= nr_queues) |
50 |
| - break; |
51 |
| - map[cpu] = queue_index(qmap, nr_queues, q++); |
| 21 | + const struct cpumask *masks; |
| 22 | + unsigned int queue, cpu; |
| 23 | + |
| 24 | + masks = group_cpus_evenly(qmap->nr_queues); |
| 25 | + if (!masks) { |
| 26 | + for_each_possible_cpu(cpu) |
| 27 | + qmap->mq_map[cpu] = qmap->queue_offset; |
| 28 | + return; |
52 | 29 | }
|
53 | 30 |
|
54 |
| - for_each_possible_cpu(cpu) { |
55 |
| - if (map[cpu] != -1) |
56 |
| - continue; |
57 |
| - /* |
58 |
| - * First do sequential mapping between CPUs and queues. |
59 |
| - * In case we still have CPUs to map, and we have some number of |
60 |
| - * threads per cores then map sibling threads to the same queue |
61 |
| - * for performance optimizations. |
62 |
| - */ |
63 |
| - if (q < nr_queues) { |
64 |
| - map[cpu] = queue_index(qmap, nr_queues, q++); |
65 |
| - } else { |
66 |
| - first_sibling = get_first_sibling(cpu); |
67 |
| - if (first_sibling == cpu) |
68 |
| - map[cpu] = queue_index(qmap, nr_queues, q++); |
69 |
| - else |
70 |
| - map[cpu] = map[first_sibling]; |
71 |
| - } |
| 31 | + for (queue = 0; queue < qmap->nr_queues; queue++) { |
| 32 | + for_each_cpu(cpu, &masks[queue]) |
| 33 | + qmap->mq_map[cpu] = qmap->queue_offset + queue; |
72 | 34 | }
|
| 35 | + kfree(masks); |
73 | 36 | }
|
74 | 37 | EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
75 | 38 |
|
|
0 commit comments