summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 11:29:22 +0100
committerIngo Molnar <mingo@kernel.org>2013-10-09 14:47:49 +0200
commite29cf08b05dc0b8151d65704d96d525a9e179a6b (patch)
tree07eb90e16973acca57ed9bc3866f2bff8ab0750a
parent8c8a743c5087bac9caac8155b8f3b367e75cdd0b (diff)
downloadlinux-e29cf08b05dc0b8151d65704d96d525a9e179a6b.tar.gz
linux-e29cf08b05dc0b8151d65704d96d525a9e179a6b.tar.xz
sched/numa: Report a NUMA task group ID
It is desirable to model from userspace how the scheduler groups tasks over time. This patch adds an ID to the numa_group and reports it via /proc/PID/status. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-45-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--fs/proc/array.c2
-rw-r--r--include/linux/sched.h5
-rw-r--r--kernel/sched/fair.c7
3 files changed, 14 insertions, 0 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index cbd0f1b324b9..1bd2077187fd 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -183,6 +183,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
+ "Ngid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
@@ -190,6 +191,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
+ task_numa_group_id(p),
pid_nr_ns(pid, ns),
ppid, tpid,
from_kuid_munged(user_ns, cred->uid),
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f587ded5c148..b0b343b1ba64 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1452,12 +1452,17 @@ struct task_struct {
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, bool migrated);
+extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
bool migrated)
{
}
+static inline pid_t task_numa_group_id(struct task_struct *p)
+{
+ return 0;
+}
static inline void set_numabalancing_state(bool enabled)
{
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 85565053a6ed..5bd309c035c7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -893,12 +893,18 @@ struct numa_group {
spinlock_t lock; /* nr_tasks, tasks */
int nr_tasks;
+ pid_t gid;
struct list_head task_list;
struct rcu_head rcu;
atomic_long_t faults[0];
};
+pid_t task_numa_group_id(struct task_struct *p)
+{
+ return p->numa_group ? p->numa_group->gid : 0;
+}
+
static inline int task_faults_idx(int nid, int priv)
{
return 2 * nid + priv;
@@ -1265,6 +1271,7 @@ static void task_numa_group(struct task_struct *p, int cpupid)
atomic_set(&grp->refcount, 1);
spin_lock_init(&grp->lock);
INIT_LIST_HEAD(&grp->task_list);
+ grp->gid = p->pid;
for (i = 0; i < 2*nr_node_ids; i++)
atomic_long_set(&grp->faults[i], p->numa_faults[i]);