|
3 | 3 | * Scheduler topology setup/handling methods
|
4 | 4 | */
|
5 | 5 |
|
| 6 | +#include <linux/bsearch.h> |
| 7 | + |
6 | 8 | DEFINE_MUTEX(sched_domains_mutex);
|
7 | 9 |
|
8 | 10 | /* Protected by sched_domains_mutex: */
|
@@ -2067,6 +2069,61 @@ int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
|
2067 | 2069 | return found;
|
2068 | 2070 | }
|
2069 | 2071 |
|
| 2072 | +struct __cmp_key { |
| 2073 | + const struct cpumask *cpus; |
| 2074 | + struct cpumask ***masks; |
| 2075 | + int node; |
| 2076 | + int cpu; |
| 2077 | + int w; |
| 2078 | +}; |
| 2079 | + |
| 2080 | +static int hop_cmp(const void *a, const void *b) |
| 2081 | +{ |
| 2082 | + struct cpumask **prev_hop = *((struct cpumask ***)b - 1); |
| 2083 | + struct cpumask **cur_hop = *(struct cpumask ***)b; |
| 2084 | + struct __cmp_key *k = (struct __cmp_key *)a; |
| 2085 | + |
| 2086 | + if (cpumask_weight_and(k->cpus, cur_hop[k->node]) <= k->cpu) |
| 2087 | + return 1; |
| 2088 | + |
| 2089 | + k->w = (b == k->masks) ? 0 : cpumask_weight_and(k->cpus, prev_hop[k->node]); |
| 2090 | + if (k->w <= k->cpu) |
| 2091 | + return 0; |
| 2092 | + |
| 2093 | + return -1; |
| 2094 | +} |
| 2095 | + |
| 2096 | +/* |
| 2097 | + * sched_numa_find_nth_cpu() - given the NUMA topology, find the Nth next cpu |
| 2098 | + * closest to @cpu from @cpumask. |
| 2099 | + * cpumask: cpumask to find a cpu from |
| 2100 | + * cpu: Nth cpu to find |
| 2101 | + * |
| 2102 | + * returns: cpu, or nr_cpu_ids when nothing found. |
| 2103 | + */ |
| 2104 | +int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node) |
| 2105 | +{ |
| 2106 | + struct __cmp_key k = { .cpus = cpus, .node = node, .cpu = cpu }; |
| 2107 | + struct cpumask ***hop_masks; |
| 2108 | + int hop, ret = nr_cpu_ids; |
| 2109 | + |
| 2110 | + rcu_read_lock(); |
| 2111 | + |
| 2112 | + k.masks = rcu_dereference(sched_domains_numa_masks); |
| 2113 | + if (!k.masks) |
| 2114 | + goto unlock; |
| 2115 | + |
| 2116 | + hop_masks = bsearch(&k, k.masks, sched_domains_numa_levels, sizeof(k.masks[0]), hop_cmp); |
| 2117 | + hop = hop_masks - k.masks; |
| 2118 | + |
| 2119 | + ret = hop ? |
| 2120 | + cpumask_nth_and_andnot(cpu - k.w, cpus, k.masks[hop][node], k.masks[hop-1][node]) : |
| 2121 | + cpumask_nth_and(cpu, cpus, k.masks[0][node]); |
| 2122 | +unlock: |
| 2123 | + rcu_read_unlock(); |
| 2124 | + return ret; |
| 2125 | +} |
| 2126 | +EXPORT_SYMBOL_GPL(sched_numa_find_nth_cpu); |
2070 | 2127 | #endif /* CONFIG_NUMA */
|
2071 | 2128 |
|
2072 | 2129 | static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
0 commit comments