Release 4.12 include/linux/topology.h
/*
* include/linux/topology.h
*
* Written by: Matthew Dobson, IBM Corporation
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <colpatch@us.ibm.com>
*/
#ifndef _LINUX_TOPOLOGY_H
#define _LINUX_TOPOLOGY_H
#include <linux/cpumask.h>
#include <linux/bitops.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/topology.h>
#ifndef nr_cpus_node
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
#define for_each_node_with_cpus(node) \
for_each_online_node(node) \
if (nr_cpus_node(node))
int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
#define REMOTE_DISTANCE 20
#ifndef node_distance
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
#endif
#ifndef RECLAIM_DISTANCE
/*
* If the distance between nodes in a system is larger than RECLAIM_DISTANCE
* (in whatever arch specific measurement units returned by node_distance())
* and node_reclaim_mode is enabled then the VM will only call node_reclaim()
* on nodes within this distance.
*/
#define RECLAIM_DISTANCE 30
#endif
#ifndef PENALTY_FOR_NODE_WITH_CPUS
#define PENALTY_FOR_NODE_WITH_CPUS (1)
#endif
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DECLARE_PER_CPU(int, numa_node);
#ifndef numa_node_id
/* Returns the number of the current Node. */
static inline int numa_node_id(void)
{
return raw_cpu_read(numa_node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 14 | 93.33% | 1 | 50.00% |
Christoph Lameter | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
#endif
#ifndef cpu_to_node
static inline int cpu_to_node(int cpu)
{
return per_cpu(numa_node, cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
#endif
#ifndef set_numa_node
static inline void set_numa_node(int node)
{
this_cpu_write(numa_node, node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 16 | 94.12% | 1 | 50.00% |
Alex Shi | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
#endif
#ifndef set_cpu_numa_node
static inline void set_cpu_numa_node(int cpu, int node)
{
per_cpu(numa_node, cpu) = node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#endif
#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
/* Returns the number of the current Node. */
#ifndef numa_node_id
static inline int numa_node_id(void)
{
return cpu_to_node(raw_smp_processor_id());
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif
#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
* It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
extern int _node_numa_mem_[MAX_NUMNODES];
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
_node_numa_mem_[numa_node_id()] = node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 15 | 60.00% | 1 | 33.33% |
JoonSoo Kim | 9 | 36.00% | 1 | 33.33% |
Alex Shi | 1 | 4.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
#endif
#ifndef node_to_mem_node
static inline int node_to_mem_node(int node)
{
return _node_numa_mem_[node];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
JoonSoo Kim | 15 | 93.75% | 1 | 50.00% |
Lee Schermerhorn | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
#endif
#ifndef numa_mem_id
/* Returns the number of the nearest Node with memory */
static inline int numa_mem_id(void)
{
return raw_cpu_read(_numa_mem_);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 14 | 93.33% | 1 | 50.00% |
Christoph Lameter | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
#endif
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
return per_cpu(_numa_mem_, cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
#endif
#ifndef set_cpu_numa_mem
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
_node_numa_mem_[cpu_to_node(cpu)] = node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 22 | 68.75% | 1 | 50.00% |
JoonSoo Kim | 10 | 31.25% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
#endif
#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
#ifndef numa_mem_id
/* Returns the number of the nearest Node with memory */
static inline int numa_mem_id(void)
{
return numa_node_id();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#endif
#ifndef node_to_mem_node
static inline int node_to_mem_node(int node)
{
return node;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
JoonSoo Kim | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#endif
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
return cpu_to_node(cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
#ifdef CONFIG_SCHED_SMT
static inline const struct cpumask *cpu_smt_mask(int cpu)
{
return topology_sibling_cpumask(cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vincent Guittot | 18 | 94.74% | 1 | 50.00% |
Bartosz Golaszewski | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
#endif
static inline const struct cpumask *cpu_cpu_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vincent Guittot | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#endif /* _LINUX_TOPOLOGY_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lee Schermerhorn | 278 | 51.96% | 2 | 8.33% |
JoonSoo Kim | 64 | 11.96% | 1 | 4.17% |
Andrew Morton | 61 | 11.40% | 4 | 16.67% |
Vincent Guittot | 45 | 8.41% | 1 | 4.17% |
Ben Hutchings | 26 | 4.86% | 1 | 4.17% |
Rusty Russell | 21 | 3.93% | 2 | 8.33% |
Christoph Lameter | 10 | 1.87% | 2 | 8.33% |
Andi Kleen | 10 | 1.87% | 1 | 4.17% |
Heiko Carstens | 8 | 1.50% | 3 | 12.50% |
Ingo Molnar | 4 | 0.75% | 2 | 8.33% |
Bartosz Golaszewski | 3 | 0.56% | 1 | 4.17% |
Alex Shi | 2 | 0.37% | 1 | 4.17% |
Mel Gorman | 1 | 0.19% | 1 | 4.17% |
Motohiro Kosaki | 1 | 0.19% | 1 | 4.17% |
Mike Travis | 1 | 0.19% | 1 | 4.17% |
Total | 535 | 100.00% | 24 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.