Return-Path: <linux-rt-users-owner@vger.kernel.org>
Received: from rack3slot8.osadl.org (rack3slot8.osadl.org [127.0.0.1])
by rack3slot8.osadl.org (8.13.8/8.13.8/CE-2010120801) with ESMTP id r1DGh9t4011728
for <ce@thllin.ceag.ch>; Wed, 13 Feb 2013 17:43:09 +0100
Received: from toro.web-alm.net (uucp@localhost)
by rack3slot8.osadl.org (8.13.8/8.13.8/Submit) with bsmtp id r1DGh8UE011725
for ce@mailgate.computer-experts.de; Wed, 13 Feb 2013 17:43:08 +0100
Received: from www.osadl.org (www.osadl.org [62.245.132.105])
by toro.web-alm.net (8.12.11.20060308/8.12.11/Web-Alm-2003112001) with ESMTP id r1DGgXvC024017
for <ce@ceag.ch>; Wed, 13 Feb 2013 17:42:33 +0100
Received: from vger.kernel.org (vger.kernel.org [209.132.180.67])
by www.osadl.org (8.13.8/8.13.8/OSADL-2007092901) with ESMTP id r1DGgM2A002951
for <Carsten.Emde@osadl.org>; Wed, 13 Feb 2013 17:42:27 +0100
Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand
id S964786Ab3BMQlp (ORCPT <rfc822;Carsten.Emde@osadl.org>);
Wed, 13 Feb 2013 11:41:45 -0500
Received: from www.linutronix.de ([62.245.132.108]:60067 "EHLO
Galois.linutronix.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org
with ESMTP id S934410Ab3BMQll (ORCPT
<rfc822;linux-rt-users@vger.kernel.org>);
Wed, 13 Feb 2013 11:41:41 -0500
Received: from localhost ([127.0.0.1] helo=localhost.localdomain)
by Galois.linutronix.de with esmtp (Exim 4.72)
(envelope-from <bigeasy@linutronix.de>)
id 1U5exW-0005iT-Oz; Wed, 13 Feb 2013 17:13:23 +0100
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
To: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-kernel@vger.kernel.org, linux-rt-users@vger.kernel.org,
Carsten Emde <C.Emde@osadl.org>, Thomas Gleixner <tglx@linutronix.de>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Subject: [PATCH 15/16] mm: Enable SLUB for RT
Date: Wed, 13 Feb 2013 17:12:10 +0100
Message-Id: <1360771932-27150-16-git-send-email-bigeasy@linutronix.de>
X-Mailer: git-send-email 1.7.10.4
In-Reply-To: <1360771932-27150-1-git-send-email-bigeasy@linutronix.de>
References: <1360771932-27150-1-git-send-email-bigeasy@linutronix.de>
X-Linutronix-Spam-Score: -1.0
X-Linutronix-Spam-Level: -
X-Linutronix-Spam-Status: No , -1.0 points, 5.0 required, ALL_TRUSTED=-1,SHORTCIRCUIT=-0.0001
Sender: linux-rt-users-owner@vger.kernel.org
Precedence: bulk
List-ID: <linux-rt-users.vger.kernel.org>
X-Mailing-List: linux-rt-users@vger.kernel.org
X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00,RCVD_IN_DNSWL_LOW
autolearn=unavailable version=3.3.1
X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on rack3slot8.osadl.org
X-Virus-Scanned: ClamAV version 0.94.2, clamav-milter version 0.94.2 on rack3slot8.osadl.org
X-Virus-Status: Clean
From: Thomas Gleixner <tglx@linutronix.de>
Make SLUB RT aware and remove the restriction in Kconfig.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[bigeasy@linutronix: fix a few conflicts]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/slub_def.h | 2
init/Kconfig | 1
mm/slub.c | 115 ++++++++++++++++++++++++++++++++++++-----------
3 files changed, 90 insertions(+), 28 deletions(-)
Index: linux-3.2.39-rt59/include/linux/slub_def.h
===================================================================
@ linux-3.2.39-rt59/include/linux/slub_def.h:55 @ struct kmem_cache_cpu {
};
struct kmem_cache_node {
- spinlock_t list_lock; /* Protect partial list and nr_partial */
+ raw_spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
Index: linux-3.2.39-rt59/init/Kconfig
===================================================================
--- linux-3.2.39-rt59.orig/init/Kconfig
+++ linux-3.2.39-rt59/init/Kconfig
@ linux-3.2.39-rt59/include/linux/slub_def.h:1290 @ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
- depends on !PREEMPT_RT_FULL
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
Index: linux-3.2.39-rt59/mm/slub.c
===================================================================
--- linux-3.2.39-rt59.orig/mm/slub.c
+++ linux-3.2.39-rt59/mm/slub.c
@ linux-3.2.39-rt59/include/linux/slub_def.h:1261 @ static inline void slab_free_hook(struct
#endif /* CONFIG_SLUB_DEBUG */
+struct slub_free_list {
+ raw_spinlock_t lock;
+ struct list_head list;
+};
+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
+
/*
* Slab allocation and freeing
*/
@ linux-3.2.39-rt59/include/linux/slub_def.h:1291 @ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (system_state == SYSTEM_RUNNING)
+#else
if (flags & __GFP_WAIT)
+#endif
local_irq_enable();
flags |= s->allocflags;
@ linux-3.2.39-rt59/include/linux/slub_def.h:1319 @ static struct page *allocate_slab(struct
stat(s, ORDER_FALLBACK);
}
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (system_state == SYSTEM_RUNNING)
+#else
if (flags & __GFP_WAIT)
+#endif
local_irq_disable();
if (!page)
@ linux-3.2.39-rt59/include/linux/slub_def.h:1429 @ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
+static void free_delayed(struct kmem_cache *s, struct list_head *h)
+{
+ while(!list_empty(h)) {
+ struct page *page = list_first_entry(h, struct page, lru);
+
+ list_del(&page->lru);
+ __free_slab(s, page);
+ }
+}
+
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
@ linux-3.2.39-rt59/include/linux/slub_def.h:1473 @ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
+ } else if (irqs_disabled()) {
+ struct slub_free_list *f = &__get_cpu_var(slub_free_list);
+
+ raw_spin_lock(&f->lock);
+ list_add(&page->lru, &f->list);
+ raw_spin_unlock(&f->lock);
} else
__free_slab(s, page);
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:1578 @ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t = acquire_slab(s, n, page, object == NULL);
int available;
@ linux-3.2.39-rt59/include/linux/slub_def.h:1599 @ static void *get_partial_node(struct kme
break;
}
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
return object;
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:1848 @ redo:
* that acquire_slab() will see a slab page that
* is frozen
*/
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
}
} else {
m = M_FULL;
@ linux-3.2.39-rt59/include/linux/slub_def.h:1859 @ redo:
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
}
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:1894 @ redo:
goto redo;
if (lock)
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
@ linux-3.2.39-rt59/include/linux/slub_def.h:1943 @ static void unfreeze_partials(struct kme
m = M_PARTIAL;
if (n != n2) {
if (n)
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
n = n2;
- spin_lock(&n->list_lock);
+ raw_spin_lock(&n->list_lock);
}
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:1972 @ static void unfreeze_partials(struct kme
}
if (n)
- spin_unlock(&n->list_lock);
+ raw_spin_unlock(&n->list_lock);
while (discard_page) {
page = discard_page;
@ linux-3.2.39-rt59/include/linux/slub_def.h:1993 @ static void unfreeze_partials(struct kme
* If we did not find a slot then simply move all the partials to the
* per node partial list.
*/
-int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
struct page *oldpage;
int pages;
@ linux-3.2.39-rt59/include/linux/slub_def.h:2008 @ int put_cpu_partial(struct kmem_cache *s
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
+ LIST_HEAD(tofree);
+ struct slub_free_list *f;
unsigned long flags;
/*
* partial array is full. Move the existing
@ linux-3.2.39-rt59/include/linux/slub_def.h:2017 @ int put_cpu_partial(struct kmem_cache *s
*/
local_irq_save(flags);
unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
+ f = &__get_cpu_var(slub_free_list);
+ raw_spin_lock(&f->lock);
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock(&f->lock);
local_irq_restore(flags);
+ free_delayed(s, &tofree);
pobjects = 0;
pages = 0;
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:2080 @ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
+ LIST_HEAD(tofree);
+ int cpu;
+
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+ for_each_online_cpu(cpu) {
+ struct slub_free_list *f;
+
+ if (!has_cpu_slab(cpu, s))
+ continue;
+
+ f = &per_cpu(slub_free_list, cpu);
+ raw_spin_lock_irq(&f->lock);
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock_irq(&f->lock);
+ free_delayed(s, &tofree);
+ }
}
/*
@ linux-3.2.39-rt59/include/linux/slub_def.h:2123 @ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:2222 @ static inline void *new_slab_objects(str
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
+ struct slub_free_list *f;
+ LIST_HEAD(tofree);
void **object;
unsigned long flags;
struct page new;
@ linux-3.2.39-rt59/include/linux/slub_def.h:2290 @ redo:
load_freelist:
c->freelist = get_freepointer(s, object);
c->tid = next_tid(c->tid);
+out:
+ f = &__get_cpu_var(slub_free_list);
+ raw_spin_lock(&f->lock);
+ list_splice_init(&f->list, &tofree);
+ raw_spin_unlock(&f->lock);
local_irq_restore(flags);
+ free_delayed(s, &tofree);
return object;
new_slab:
@ linux-3.2.39-rt59/include/linux/slub_def.h:2321 @ new_slab:
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
- local_irq_restore(flags);
- return NULL;
+ goto out;
}
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:2335 @ new_slab:
c->freelist = get_freepointer(s, object);
deactivate_slab(s, c);
c->node = NUMA_NO_NODE;
- local_irq_restore(flags);
- return object;
+ goto out;
}
/*
@ linux-3.2.39-rt59/include/linux/slub_def.h:2527 @ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
}
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:2576 @ static void __slab_free(struct kmem_cach
stat(s, FREE_ADD_PARTIAL);
}
}
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
@ linux-3.2.39-rt59/include/linux/slub_def.h:2590 @ slab_empty:
/* Slab must be on the full list */
remove_full(s, page);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:2820 @ static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
n->nr_partial = 0;
- spin_lock_init(&n->list_lock);
+ raw_spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
@ linux-3.2.39-rt59/include/linux/slub_def.h:3560 @ int kmem_cache_shrink(struct kmem_cache
for (i = 0; i < objects; i++)
INIT_LIST_HEAD(slabs_by_inuse + i);
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
/*
* Build lists indexed by the items in use in each slab.
@ linux-3.2.39-rt59/include/linux/slub_def.h:3581 @ int kmem_cache_shrink(struct kmem_cache
for (i = objects - 1; i > 0; i--)
list_splice(slabs_by_inuse + i, n->partial.prev);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
/* Release empty slabs */
list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
@ linux-3.2.39-rt59/include/linux/slub_def.h:3747 @ void __init kmem_cache_init(void)
int i;
int caches = 0;
struct kmem_cache *temp_kmem_cache;
- int order;
+ int order, cpu;
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
+ for_each_possible_cpu(cpu) {
+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
+ }
+
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
@ linux-3.2.39-rt59/include/linux/slub_def.h:4176 @ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
@ linux-3.2.39-rt59/include/linux/slub_def.h:4199 @ static int validate_slab_node(struct kme
atomic_long_read(&n->nr_slabs));
out:
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
return count;
}
@ linux-3.2.39-rt59/include/linux/slub_def.h:4389 @ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
- spin_lock_irqsave(&n->list_lock, flags);
+ raw_spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
process_slab(&t, s, page, alloc, map);
list_for_each_entry(page, &n->full, lru)
process_slab(&t, s, page, alloc, map);
- spin_unlock_irqrestore(&n->list_lock, flags);
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {