Skip to content

Commit

Permalink
Use pages for emergency allocations when possible
Browse files Browse the repository at this point in the history
A concept of an emergency allocation was introduced last year to permit
allocations that took excessive periods of time to switch to physical
memory to make forward progress.

Unfortunately, the Linux kernel's physical memory allocator does SLAB
allocation for all allocations less than or equal to 8192. It is
possible for Linux's SLAB allocator to invoke direct reclaim when
allocating new slabs. This can cause swap to deadlock. Additionally,
memory fragmentation is able to cause allocations to hang whenever an
allocation is made from a cache that uses slabs that span multiple
pages. In specific, whenever the allocation is greater than 256-bytes.

Whenever an emergency object is small enough to fit into a single page
and we allocate a page for the emergency allocation instead of going
through kmalloc(). If the object and its ske header cannot fit together
in a single page, we allocate two non-contiguous pages. Larger object
sizes are still at risk, but there is little we can do for them.

Signed-off-by: Richard Yao <[email protected]>
  • Loading branch information
ryao committed Apr 11, 2014
1 parent b56687c commit 281235d
Showing 1 changed file with 24 additions and 7 deletions.
31 changes: 24 additions & 7 deletions module/spl/spl-kmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1169,6 +1169,7 @@ static int
spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
{
spl_kmem_emergency_t *ske;
uint32_t obj_size = skc->skc_obj_size;
int empty;
SENTRY;

Expand All @@ -1179,13 +1180,18 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
if (!empty)
SRETURN(-EEXIST);

ske = kmalloc(sizeof(*ske), flags);
ske = (void *)__get_free_page(flags);
if (ske == NULL)
SRETURN(-ENOMEM);

ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
if (obj_size + P2ROUNDUP(sizeof(*ske), skc->skc_obj_align) <= PAGE_SIZE)
ske->ske_obj = ske + P2ROUNDUP(sizeof(*ske), skc->skc_obj_align);
else if (obj_size <= PAGE_SIZE)
ske->ske_obj = (void *)__get_free_page(flags);
else
ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
if (ske->ske_obj == NULL) {
kfree(ske);
free_page((unsigned long)ske);
SRETURN(-ENOMEM);
}

Expand All @@ -1200,8 +1206,13 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
spin_unlock(&skc->skc_lock);

if (unlikely(!empty)) {
kfree(ske->ske_obj);
kfree(ske);
if (obj_size + P2ROUNDUP(sizeof(*ske), skc->skc_obj_align) <= PAGE_SIZE);
else if (obj_size <= PAGE_SIZE)
free_page((unsigned long)ske->ske_obj);
else
kfree(ske->ske_obj);

free_page((unsigned long)ske);
SRETURN(-EINVAL);
}

Expand All @@ -1217,6 +1228,7 @@ static int
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
uint32_t obj_size = skc->skc_obj_size;
SENTRY;

spin_lock(&skc->skc_lock);
Expand All @@ -1231,8 +1243,13 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
if (unlikely(ske == NULL))
SRETURN(-ENOENT);

kfree(ske->ske_obj);
kfree(ske);
if (obj_size + P2ROUNDUP(sizeof(*ske), skc->skc_obj_align) <= PAGE_SIZE);
else if (obj_size <= PAGE_SIZE)
free_page((unsigned long)ske->ske_obj);
else
kfree(ske->ske_obj);

free_page((unsigned long)ske);

SRETURN(0);
}
Expand Down

0 comments on commit 281235d

Please sign in to comment.