4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2012, 2015 Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Author: liang@whamcloud.com
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/cpu.h>
38 #include <linux/sched.h>
39 #include "../../../include/linux/libcfs/libcfs.h"
44 * modparam for setting number of partitions
46 * 0 : estimate best value based on cores or NUMA nodes
47 * 1 : disable multiple partitions
48 * >1 : specify number of partitions
50 static int cpu_npartitions;
51 module_param(cpu_npartitions, int, 0444);
52 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
55 * modparam for setting CPU partitions patterns:
57 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
58 * number in bracket is processor ID (core or HT)
60 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
61 * are NUMA node ID, number before bracket is CPU partition ID.
63 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
65 static char *cpu_pattern = "";
66 module_param(cpu_pattern, charp, 0444);
67 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
70 /* serialize hotplug etc */
72 /* reserved for hotplug */
73 unsigned long cpt_version;
74 /* mutex to protect cpt_cpumask */
75 struct mutex cpt_mutex;
76 /* scratch buffer for set/unset_node */
77 cpumask_t *cpt_cpumask;
80 static struct cfs_cpt_data cpt_data;
83 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
87 if (cptab->ctb_cpu2cpt != NULL) {
88 LIBCFS_FREE(cptab->ctb_cpu2cpt,
90 sizeof(cptab->ctb_cpu2cpt[0]));
93 for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
94 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
96 if (part->cpt_nodemask != NULL) {
97 LIBCFS_FREE(part->cpt_nodemask,
98 sizeof(*part->cpt_nodemask));
101 if (part->cpt_cpumask != NULL)
102 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
105 if (cptab->ctb_parts != NULL) {
106 LIBCFS_FREE(cptab->ctb_parts,
107 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
110 if (cptab->ctb_nodemask != NULL)
111 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
112 if (cptab->ctb_cpumask != NULL)
113 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
115 LIBCFS_FREE(cptab, sizeof(*cptab));
117 EXPORT_SYMBOL(cfs_cpt_table_free);
119 struct cfs_cpt_table *
120 cfs_cpt_table_alloc(unsigned int ncpt)
122 struct cfs_cpt_table *cptab;
125 LIBCFS_ALLOC(cptab, sizeof(*cptab));
129 cptab->ctb_nparts = ncpt;
131 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
132 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
134 if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
137 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
138 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
139 if (cptab->ctb_cpu2cpt == NULL)
142 memset(cptab->ctb_cpu2cpt, -1,
143 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
145 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
146 if (cptab->ctb_parts == NULL)
149 for (i = 0; i < ncpt; i++) {
150 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
152 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
153 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
154 if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
158 spin_lock(&cpt_data.cpt_lock);
159 /* Reserved for hotplug */
160 cptab->ctb_version = cpt_data.cpt_version;
161 spin_unlock(&cpt_data.cpt_lock);
166 cfs_cpt_table_free(cptab);
169 EXPORT_SYMBOL(cfs_cpt_table_alloc);
172 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
179 for (i = 0; i < cptab->ctb_nparts; i++) {
181 rc = snprintf(tmp, len, "%d\t: ", i);
191 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
192 rc = snprintf(tmp, len, "%d ", j);
212 EXPORT_SYMBOL(cfs_cpt_table_print);
215 cfs_cpt_number(struct cfs_cpt_table *cptab)
217 return cptab->ctb_nparts;
219 EXPORT_SYMBOL(cfs_cpt_number);
222 cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
224 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
226 return cpt == CFS_CPT_ANY ?
227 cpumask_weight(cptab->ctb_cpumask) :
228 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
230 EXPORT_SYMBOL(cfs_cpt_weight);
233 cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
235 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
237 return cpt == CFS_CPT_ANY ?
238 cpumask_any_and(cptab->ctb_cpumask,
239 cpu_online_mask) < nr_cpu_ids :
240 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
241 cpu_online_mask) < nr_cpu_ids;
243 EXPORT_SYMBOL(cfs_cpt_online);
246 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
248 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
250 return cpt == CFS_CPT_ANY ?
251 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
253 EXPORT_SYMBOL(cfs_cpt_cpumask);
256 cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
258 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
260 return cpt == CFS_CPT_ANY ?
261 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
263 EXPORT_SYMBOL(cfs_cpt_nodemask);
266 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
270 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
272 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
273 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
277 if (cptab->ctb_cpu2cpt[cpu] != -1) {
278 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
279 cpu, cptab->ctb_cpu2cpt[cpu]);
283 cptab->ctb_cpu2cpt[cpu] = cpt;
285 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
286 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
288 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
289 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
291 node = cpu_to_node(cpu);
293 /* first CPU of @node in this CPT table */
294 if (!node_isset(node, *cptab->ctb_nodemask))
295 node_set(node, *cptab->ctb_nodemask);
297 /* first CPU of @node in this partition */
298 if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
299 node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
303 EXPORT_SYMBOL(cfs_cpt_set_cpu);
306 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
311 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
313 if (cpu < 0 || cpu >= nr_cpu_ids) {
314 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
318 if (cpt == CFS_CPT_ANY) {
319 /* caller doesn't know the partition ID */
320 cpt = cptab->ctb_cpu2cpt[cpu];
321 if (cpt < 0) { /* not set in this CPT-table */
322 CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n",
327 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
329 "CPU %d is not in cpu-partition %d\n", cpu, cpt);
333 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
334 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
336 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
337 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
338 cptab->ctb_cpu2cpt[cpu] = -1;
340 node = cpu_to_node(cpu);
342 LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
343 LASSERT(node_isset(node, *cptab->ctb_nodemask));
345 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
346 /* this CPT has other CPU belonging to this node? */
347 if (cpu_to_node(i) == node)
352 node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
354 for_each_cpu(i, cptab->ctb_cpumask) {
355 /* this CPT-table has other CPU belonging to this node? */
356 if (cpu_to_node(i) == node)
361 node_clear(node, *cptab->ctb_nodemask);
365 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
368 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
372 if (cpumask_weight(mask) == 0 ||
373 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
374 CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
379 for_each_cpu(i, mask) {
380 if (!cfs_cpt_set_cpu(cptab, cpt, i))
386 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
389 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
393 for_each_cpu(i, mask)
394 cfs_cpt_unset_cpu(cptab, cpt, i);
396 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
399 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
404 if (node < 0 || node >= MAX_NUMNODES) {
406 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
410 mutex_lock(&cpt_data.cpt_mutex);
412 mask = cpt_data.cpt_cpumask;
413 cpumask_copy(mask, cpumask_of_node(node));
415 rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
417 mutex_unlock(&cpt_data.cpt_mutex);
421 EXPORT_SYMBOL(cfs_cpt_set_node);
424 cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
428 if (node < 0 || node >= MAX_NUMNODES) {
430 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
434 mutex_lock(&cpt_data.cpt_mutex);
436 mask = cpt_data.cpt_cpumask;
437 cpumask_copy(mask, cpumask_of_node(node));
439 cfs_cpt_unset_cpumask(cptab, cpt, mask);
441 mutex_unlock(&cpt_data.cpt_mutex);
443 EXPORT_SYMBOL(cfs_cpt_unset_node);
446 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
450 for_each_node_mask(i, *mask) {
451 if (!cfs_cpt_set_node(cptab, cpt, i))
457 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
460 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
464 for_each_node_mask(i, *mask)
465 cfs_cpt_unset_node(cptab, cpt, i);
467 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
470 cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
475 if (cpt == CFS_CPT_ANY) {
476 last = cptab->ctb_nparts - 1;
482 for (; cpt <= last; cpt++) {
483 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
484 cfs_cpt_unset_cpu(cptab, cpt, i);
487 EXPORT_SYMBOL(cfs_cpt_clear);
490 cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
497 /* convert CPU partition ID to HW node id */
499 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
500 mask = cptab->ctb_nodemask;
501 rotor = cptab->ctb_spread_rotor++;
503 mask = cptab->ctb_parts[cpt].cpt_nodemask;
504 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
507 weight = nodes_weight(*mask);
512 for_each_node_mask(node, *mask) {
520 EXPORT_SYMBOL(cfs_cpt_spread_node);
523 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
525 int cpu = smp_processor_id();
526 int cpt = cptab->ctb_cpu2cpt[cpu];
532 /* don't return negative value for safety of upper layer,
533 * instead we shadow the unknown cpu to a valid partition ID */
534 cpt = cpu % cptab->ctb_nparts;
539 EXPORT_SYMBOL(cfs_cpt_current);
542 cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
544 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
546 return cptab->ctb_cpu2cpt[cpu];
548 EXPORT_SYMBOL(cfs_cpt_of_cpu);
551 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
554 nodemask_t *nodemask;
558 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
560 if (cpt == CFS_CPT_ANY) {
561 cpumask = cptab->ctb_cpumask;
562 nodemask = cptab->ctb_nodemask;
564 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
565 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
568 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
569 CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
574 for_each_online_cpu(i) {
575 if (cpumask_test_cpu(i, cpumask))
578 rc = set_cpus_allowed_ptr(current, cpumask);
579 set_mems_allowed(*nodemask);
581 schedule(); /* switch to allowed CPU */
586 /* don't need to set affinity because all online CPUs are covered */
589 EXPORT_SYMBOL(cfs_cpt_bind);
592 * Choose max to \a number CPUs from \a node and set them in \a cpt.
593 * We always prefer to choose CPU in the same core/socket.
596 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
597 cpumask_t *node, int number)
599 cpumask_t *socket = NULL;
600 cpumask_t *core = NULL;
606 if (number >= cpumask_weight(node)) {
607 while (!cpumask_empty(node)) {
608 cpu = cpumask_first(node);
610 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
613 cpumask_clear_cpu(cpu, node);
618 /* allocate scratch buffer */
619 LIBCFS_ALLOC(socket, cpumask_size());
620 LIBCFS_ALLOC(core, cpumask_size());
621 if (socket == NULL || core == NULL) {
626 while (!cpumask_empty(node)) {
627 cpu = cpumask_first(node);
629 /* get cpumask for cores in the same socket */
630 cpumask_copy(socket, topology_core_cpumask(cpu));
631 cpumask_and(socket, socket, node);
633 LASSERT(!cpumask_empty(socket));
635 while (!cpumask_empty(socket)) {
638 /* get cpumask for hts in the same core */
639 cpumask_copy(core, topology_sibling_cpumask(cpu));
640 cpumask_and(core, core, node);
642 LASSERT(!cpumask_empty(core));
644 for_each_cpu(i, core) {
645 cpumask_clear_cpu(i, socket);
646 cpumask_clear_cpu(i, node);
648 rc = cfs_cpt_set_cpu(cptab, cpt, i);
657 cpu = cpumask_first(socket);
663 LIBCFS_FREE(socket, cpumask_size());
665 LIBCFS_FREE(core, cpumask_size());
669 #define CPT_WEIGHT_MIN 4u
672 cfs_cpt_num_estimate(void)
674 unsigned nnode = num_online_nodes();
675 unsigned ncpu = num_online_cpus();
678 if (ncpu <= CPT_WEIGHT_MIN) {
683 /* generate reasonable number of CPU partitions based on total number
684 * of CPUs, Preferred N should be power2 and match this condition:
685 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
686 for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
689 if (ncpt <= nnode) { /* fat numa system */
693 } else { /* ncpt > nnode */
694 while ((nnode << 1) <= ncpt)
701 #if (BITS_PER_LONG == 32)
702 /* config many CPU partitions on 32-bit system could consume
704 ncpt = min(2U, ncpt);
706 while (ncpu % ncpt != 0)
707 ncpt--; /* worst case is 1 */
712 static struct cfs_cpt_table *
713 cfs_cpt_table_create(int ncpt)
715 struct cfs_cpt_table *cptab = NULL;
716 cpumask_t *mask = NULL;
722 rc = cfs_cpt_num_estimate();
726 if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
727 CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
731 if (num_online_cpus() % ncpt != 0) {
732 CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
733 (int)num_online_cpus(), ncpt);
737 cptab = cfs_cpt_table_alloc(ncpt);
739 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
743 num = num_online_cpus() / ncpt;
745 CERROR("CPU changed while setting CPU partition\n");
749 LIBCFS_ALLOC(mask, cpumask_size());
751 CERROR("Failed to allocate scratch cpumask\n");
755 for_each_online_node(i) {
756 cpumask_copy(mask, cpumask_of_node(i));
758 while (!cpumask_empty(mask)) {
759 struct cfs_cpu_partition *part;
765 part = &cptab->ctb_parts[cpt];
767 n = num - cpumask_weight(part->cpt_cpumask);
770 rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
774 LASSERT(num >= cpumask_weight(part->cpt_cpumask));
775 if (num == cpumask_weight(part->cpt_cpumask))
781 num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
782 CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
783 cptab->ctb_nparts, num, cpt,
784 cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
788 LIBCFS_FREE(mask, cpumask_size());
793 CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
794 ncpt, num_online_nodes(), num_online_cpus());
797 LIBCFS_FREE(mask, cpumask_size());
800 cfs_cpt_table_free(cptab);
805 static struct cfs_cpt_table *
806 cfs_cpt_table_create_pattern(char *pattern)
808 struct cfs_cpt_table *cptab;
815 for (ncpt = 0;; ncpt++) { /* quick scan bracket */
816 str = strchr(str, '[');
822 str = cfs_trimwhite(pattern);
823 if (*str == 'n' || *str == 'N') {
829 (node && ncpt > num_online_nodes()) ||
830 (!node && ncpt > num_online_cpus())) {
831 CERROR("Invalid pattern %s, or too many partitions %d\n",
836 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
838 cptab = cfs_cpt_table_alloc(ncpt);
840 CERROR("Failed to allocate cpu partition table\n");
844 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
845 struct cfs_range_expr *range;
846 struct cfs_expr_list *el;
847 char *bracket = strchr(str, '[');
853 if (bracket == NULL) {
855 CERROR("Invalid pattern %s\n", str);
857 } else if (c != ncpt) {
858 CERROR("expect %d partitions but found %d\n",
865 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
866 CERROR("Invalid cpu pattern %s\n", str);
870 if (cpt < 0 || cpt >= ncpt) {
871 CERROR("Invalid partition id %d, total partitions %d\n",
876 if (cfs_cpt_weight(cptab, cpt) != 0) {
877 CERROR("Partition %d has already been set.\n", cpt);
881 str = cfs_trimwhite(str + n);
882 if (str != bracket) {
883 CERROR("Invalid pattern %s\n", str);
887 bracket = strchr(str, ']');
888 if (bracket == NULL) {
889 CERROR("missing right bracket for cpt %d, %s\n",
894 if (cfs_expr_list_parse(str, (bracket - str) + 1,
895 0, high, &el) != 0) {
896 CERROR("Can't parse number range: %s\n", str);
900 list_for_each_entry(range, &el->el_exprs, re_link) {
901 for (i = range->re_lo; i <= range->re_hi; i++) {
902 if ((i - range->re_lo) % range->re_stride != 0)
905 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
906 cfs_cpt_set_cpu(cptab, cpt, i);
908 cfs_expr_list_free(el);
914 cfs_expr_list_free(el);
916 if (!cfs_cpt_online(cptab, cpt)) {
917 CERROR("No online CPU is found on partition %d\n", cpt);
921 str = cfs_trimwhite(bracket + 1);
927 cfs_cpt_table_free(cptab);
931 #ifdef CONFIG_HOTPLUG_CPU
933 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
935 unsigned int cpu = (unsigned long)hcpu;
940 case CPU_DEAD_FROZEN:
942 case CPU_ONLINE_FROZEN:
943 spin_lock(&cpt_data.cpt_lock);
944 cpt_data.cpt_version++;
945 spin_unlock(&cpt_data.cpt_lock);
947 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
948 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
953 mutex_lock(&cpt_data.cpt_mutex);
954 /* if all HTs in a core are offline, it may break affinity */
955 cpumask_copy(cpt_data.cpt_cpumask,
956 topology_sibling_cpumask(cpu));
957 warn = cpumask_any_and(cpt_data.cpt_cpumask,
958 cpu_online_mask) >= nr_cpu_ids;
959 mutex_unlock(&cpt_data.cpt_mutex);
960 CDEBUG(warn ? D_WARNING : D_INFO,
961 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
968 static struct notifier_block cfs_cpu_notifier = {
969 .notifier_call = cfs_cpu_notify,
978 if (cfs_cpt_table != NULL)
979 cfs_cpt_table_free(cfs_cpt_table);
981 #ifdef CONFIG_HOTPLUG_CPU
982 unregister_hotcpu_notifier(&cfs_cpu_notifier);
984 if (cpt_data.cpt_cpumask != NULL)
985 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
991 LASSERT(cfs_cpt_table == NULL);
993 memset(&cpt_data, 0, sizeof(cpt_data));
995 LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
996 if (cpt_data.cpt_cpumask == NULL) {
997 CERROR("Failed to allocate scratch buffer\n");
1001 spin_lock_init(&cpt_data.cpt_lock);
1002 mutex_init(&cpt_data.cpt_mutex);
1004 #ifdef CONFIG_HOTPLUG_CPU
1005 register_hotcpu_notifier(&cfs_cpu_notifier);
1008 if (*cpu_pattern != 0) {
1009 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1010 if (cfs_cpt_table == NULL) {
1011 CERROR("Failed to create cptab from pattern %s\n",
1017 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1018 if (cfs_cpt_table == NULL) {
1019 CERROR("Failed to create ptable with npartitions %d\n",
1025 spin_lock(&cpt_data.cpt_lock);
1026 if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
1027 spin_unlock(&cpt_data.cpt_lock);
1028 CERROR("CPU hotplug/unplug during setup\n");
1031 spin_unlock(&cpt_data.cpt_lock);
1033 LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
1034 num_online_cpus(), cfs_cpt_number(cfs_cpt_table));