diff options
| -rw-r--r-- | include/linux/mm_types.h | 56 | ||||
| -rw-r--r-- | include/linux/rseq_types.h | 42 | ||||
| -rw-r--r-- | include/linux/sched.h | 11 | ||||
| -rw-r--r-- | init/init_task.c | 3 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/sched/core.c | 16 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 26 |
7 files changed, 85 insertions, 75 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 63b8c1209e7b..e4818e932a1d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -20,6 +20,7 @@ #include <linux/seqlock.h> #include <linux/percpu_counter.h> #include <linux/types.h> +#include <linux/rseq_types.h> #include <linux/bitmap.h> #include <asm/mmu.h> @@ -922,10 +923,6 @@ struct vm_area_struct { #define vma_policy(vma) NULL #endif -struct mm_cid { - unsigned int cid; -}; - /* * Opaque type representing current mm_struct flag state. Must be accessed via * mm_flags_xxx() helper functions. @@ -987,30 +984,9 @@ struct mm_struct { */ atomic_t mm_users; -#ifdef CONFIG_SCHED_MM_CID - /** - * @pcpu_cid: Per-cpu current cid. - * - * Keep track of the currently allocated mm_cid for each cpu. - * The per-cpu mm_cid values are serialized by their respective - * runqueue locks. - */ - struct mm_cid __percpu *pcpu_cid; - /** - * @nr_cpus_allowed: Number of CPUs allowed for mm. - * - * Number of CPUs allowed in the union of all mm's - * threads allowed CPUs. - */ - unsigned int nr_cpus_allowed; - /** - * @cpus_allowed_lock: Lock protecting mm cpus_allowed. - * - * Provide mutual exclusion for mm cpus_allowed and - * mm nr_cpus_allowed updates. - */ - raw_spinlock_t cpus_allowed_lock; -#endif + /* MM CID related storage */ + struct mm_mm_cid mm_cid; + #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* size of all page tables */ #endif @@ -1352,9 +1328,6 @@ static inline void vma_iter_init(struct vma_iterator *vmi, } #ifdef CONFIG_SCHED_MM_CID - -#define MM_CID_UNSET (~0U) - /* * mm_cpus_allowed: Union of all mm's threads allowed CPUs. */ @@ -1383,20 +1356,20 @@ static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) int i; for_each_possible_cpu(i) { - struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); + struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i); - pcpu_cid->cid = MM_CID_UNSET; + pcpu->cid = MM_CID_UNSET; } - mm->nr_cpus_allowed = p->nr_cpus_allowed; - raw_spin_lock_init(&mm->cpus_allowed_lock); + mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; + raw_spin_lock_init(&mm->mm_cid.lock); cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); cpumask_clear(mm_cidmask(mm)); } static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p) { - mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); - if (!mm->pcpu_cid) + mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu); + if (!mm->mm_cid.pcpu) return -ENOMEM; mm_init_cid(mm, p); return 0; @@ -1405,8 +1378,8 @@ static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct * static inline void mm_destroy_cid(struct mm_struct *mm) { - free_percpu(mm->pcpu_cid); - mm->pcpu_cid = NULL; + free_percpu(mm->mm_cid.pcpu); + mm->mm_cid.pcpu = NULL; } static inline unsigned int mm_cid_size(void) @@ -1421,10 +1394,9 @@ static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumas if (!mm) return; /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ - raw_spin_lock(&mm->cpus_allowed_lock); + guard(raw_spinlock)(&mm->mm_cid.lock); cpumask_or(mm_allowed, mm_allowed, cpumask); - WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed)); - raw_spin_unlock(&mm->cpus_allowed_lock); + WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, cpumask_weight(mm_allowed)); } #else /* CONFIG_SCHED_MM_CID */ static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { } diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index 9c7a34154de8..e444dd267c7a 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -90,4 +90,46 @@ struct rseq_data { struct rseq_data { }; #endif /* !CONFIG_RSEQ */ +#ifdef CONFIG_SCHED_MM_CID + +#define MM_CID_UNSET (~0U) + +/** + * struct sched_mm_cid - Storage for per task MM CID data + * @active: MM CID is active for the task + * @cid: The CID associated to the task + * @last_cid: The last CID associated to the task + */ +struct sched_mm_cid { + unsigned int active; + unsigned int cid; + unsigned int last_cid; +}; + +/** + * struct mm_cid_pcpu - Storage for per CPU MM_CID data + * @cid: The CID associated to the CPU + */ +struct mm_cid_pcpu { + unsigned int cid; +}; + +/** + * struct mm_mm_cid - Storage for per MM CID data + * @pcpu: Per CPU storage for CIDs associated to a CPU + * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map + * is growth only. + * @lock: Spinlock to protect all fields except @pcpu. It also protects + * the MM cid cpumask and the MM cidmask bitmap. + */ +struct mm_mm_cid { + struct mm_cid_pcpu __percpu *pcpu; + unsigned int nr_cpus_allowed; + raw_spinlock_t lock; +}; +#else /* CONFIG_SCHED_MM_CID */ +struct mm_mm_cid { }; +struct sched_mm_cid { }; +#endif /* !CONFIG_SCHED_MM_CID */ + #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index e47abc8685d7..64f080d6ed6e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1407,14 +1407,7 @@ struct task_struct { #endif /* CONFIG_NUMA_BALANCING */ struct rseq_data rseq; - -#ifdef CONFIG_SCHED_MM_CID - int mm_cid; /* Current cid in mm */ - int last_mm_cid; /* Most recent cid in mm */ - int migrate_from_cpu; - int mm_cid_active; /* Whether cid bitmap is active */ - struct callback_head cid_work; -#endif + struct sched_mm_cid mm_cid; struct tlbflush_unmap_batch tlb_ubc; @@ -2308,7 +2301,7 @@ void sched_mm_cid_fork(struct task_struct *t); void sched_mm_cid_exit_signals(struct task_struct *t); static inline int task_mm_cid(struct task_struct *t) { - return t->mm_cid; + return t->mm_cid.cid; } #else static inline void sched_mm_cid_before_execve(struct task_struct *t) { } diff --git a/init/init_task.c b/init/init_task.c index a55e2189206f..5d122699b664 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -223,6 +223,9 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = { #ifdef CONFIG_SECCOMP_FILTER .seccomp = { .filter_count = ATOMIC_INIT(0) }, #endif +#ifdef CONFIG_SCHED_MM_CID + .mm_cid = { .cid = MM_CID_UNSET, }, +#endif }; EXPORT_SYMBOL(init_task); diff --git a/kernel/fork.c b/kernel/fork.c index 9d9afe453ef1..74bc7c9f1bb3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -955,9 +955,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #endif #ifdef CONFIG_SCHED_MM_CID - tsk->mm_cid = MM_CID_UNSET; - tsk->last_mm_cid = MM_CID_UNSET; - tsk->mm_cid_active = 0; + tsk->mm_cid.cid = MM_CID_UNSET; + tsk->mm_cid.last_cid = MM_CID_UNSET; + tsk->mm_cid.active = 0; #endif return tsk; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 11a173596e0d..b1aa7d1055ac 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10376,14 +10376,14 @@ void sched_mm_cid_exit_signals(struct task_struct *t) { struct mm_struct *mm = t->mm; - if (!mm || !t->mm_cid_active) + if (!mm || !t->mm_cid.active) return; guard(preempt)(); - t->mm_cid_active = 0; - if (t->mm_cid != MM_CID_UNSET) { - cpumask_clear_cpu(t->mm_cid, mm_cidmask(mm)); - t->mm_cid = MM_CID_UNSET; + t->mm_cid.active = 0; + if (t->mm_cid.cid != MM_CID_UNSET) { + cpumask_clear_cpu(t->mm_cid.cid, mm_cidmask(mm)); + t->mm_cid.cid = MM_CID_UNSET; } } @@ -10402,14 +10402,14 @@ void sched_mm_cid_after_execve(struct task_struct *t) return; guard(preempt)(); - t->mm_cid_active = 1; + t->mm_cid.active = 1; mm_cid_select(t); } void sched_mm_cid_fork(struct task_struct *t) { - WARN_ON_ONCE(!t->mm || t->mm_cid != MM_CID_UNSET); - t->mm_cid_active = 1; + WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET); + t->mm_cid.active = 1; } #endif /* CONFIG_SCHED_MM_CID */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bf227c27b889..a17f04f075e1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3549,8 +3549,8 @@ static inline void init_sched_mm_cid(struct task_struct *t) return; /* Preset last_mm_cid */ - max_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users)); - t->last_mm_cid = max_cid - 1; + max_cid = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users)); + t->mm_cid.last_cid = max_cid - 1; } static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigned int max_cids) @@ -3561,8 +3561,8 @@ static inline bool __mm_cid_get(struct task_struct *t, unsigned int cid, unsigne return false; if (cpumask_test_and_set_cpu(cid, mm_cidmask(mm))) return false; - t->mm_cid = t->last_mm_cid = cid; - __this_cpu_write(mm->pcpu_cid->cid, cid); + t->mm_cid.cid = t->mm_cid.last_cid = cid; + __this_cpu_write(mm->mm_cid.pcpu->cid, cid); return true; } @@ -3571,14 +3571,14 @@ static inline bool mm_cid_get(struct task_struct *t) struct mm_struct *mm = t->mm; unsigned int max_cids; - max_cids = min_t(int, READ_ONCE(mm->nr_cpus_allowed), atomic_read(&mm->mm_users)); + max_cids = min_t(int, READ_ONCE(mm->mm_cid.nr_cpus_allowed), atomic_read(&mm->mm_users)); /* Try to reuse the last CID of this task */ - if (__mm_cid_get(t, t->last_mm_cid, max_cids)) + if (__mm_cid_get(t, t->mm_cid.last_cid, max_cids)) return true; /* Try to reuse the last CID of this mm on this CPU */ - if (__mm_cid_get(t, __this_cpu_read(mm->pcpu_cid->cid), max_cids)) + if (__mm_cid_get(t, __this_cpu_read(mm->mm_cid.pcpu->cid), max_cids)) return true; /* Try the first zero bit in the cidmask. */ @@ -3601,15 +3601,15 @@ static inline void mm_cid_select(struct task_struct *t) static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { - if (prev->mm_cid_active) { - if (prev->mm_cid != MM_CID_UNSET) - cpumask_clear_cpu(prev->mm_cid, mm_cidmask(prev->mm)); - prev->mm_cid = MM_CID_UNSET; + if (prev->mm_cid.active) { + if (prev->mm_cid.cid != MM_CID_UNSET) + cpumask_clear_cpu(prev->mm_cid.cid, mm_cidmask(prev->mm)); + prev->mm_cid.cid = MM_CID_UNSET; } - if (next->mm_cid_active) { + if (next->mm_cid.active) { mm_cid_select(next); - rseq_sched_set_task_mm_cid(next, next->mm_cid); + rseq_sched_set_task_mm_cid(next, next->mm_cid.cid); } } |
