From a2fc8e93da7b3ad96505756c84114b34a3644d58 Mon Sep 17 00:00:00 2001 From: dormando Date: Sun, 19 Apr 2015 17:15:17 -0700 Subject: [PATCH] fix off-by-one with slab management data sticking into the highest slab class was unallocated. Thanks to pyry for the repro case: perl -e 'use Cache::Memcached;$memd = new Cache::Memcached { servers=>["127.0.0.1:11212"]};for(20..1000){print "$_\n";$memd->set("fo2$_", "a"x1024)};' (in a loop) with: ./memcached -v -m 32 -p 11212 -f 1.012 This serves as a note to turn this into a test. --- items.c | 2 +- memcached.c | 2 +- memcached.h | 2 +- slabs.c | 5 ++--- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/items.c b/items.c index dba615c9ed..968fae0629 100644 --- a/items.c +++ b/items.c @@ -1437,7 +1437,7 @@ enum crawler_result_type lru_crawler_crawl(char *slabs) { p = strtok_r(NULL, ",", &b)) { if (!safe_strtoul(p, &sid) || sid < POWER_SMALLEST - || sid >= MAX_NUMBER_OF_SLAB_CLASSES) { + || sid >= MAX_NUMBER_OF_SLAB_CLASSES-1) { pthread_mutex_unlock(&lru_crawler_lock); return CRAWLER_BADCLASS; } diff --git a/memcached.c b/memcached.c index c9e4b11eac..6810002258 100644 --- a/memcached.c +++ b/memcached.c @@ -2835,7 +2835,7 @@ static void process_stat(conn *c, token_t *tokens, const size_t ntokens) { return; } - if (id >= MAX_NUMBER_OF_SLAB_CLASSES) { + if (id >= MAX_NUMBER_OF_SLAB_CLASSES-1) { out_string(c, "CLIENT_ERROR Illegal slab id"); return; } diff --git a/memcached.h b/memcached.h index 9d86767cfc..689a6f59c1 100644 --- a/memcached.h +++ b/memcached.h @@ -80,7 +80,7 @@ #define POWER_LARGEST 255 #define CHUNK_ALIGN_BYTES 8 /* slab class max is a 6-bit number, -1. */ -#define MAX_NUMBER_OF_SLAB_CLASSES 63 +#define MAX_NUMBER_OF_SLAB_CLASSES (63 + 1) /** How long an object can reasonably be assumed to be locked before harvesting it on a low memory condition. Default: disabled. */ diff --git a/slabs.c b/slabs.c index c582939f01..c9e29ac89b 100644 --- a/slabs.c +++ b/slabs.c @@ -115,7 +115,7 @@ void slabs_init(const size_t limit, const double factor, const bool prealloc) { memset(slabclass, 0, sizeof(slabclass)); - while (++i < MAX_NUMBER_OF_SLAB_CLASSES && size <= settings.item_size_max / factor) { + while (++i < MAX_NUMBER_OF_SLAB_CLASSES-1 && size <= settings.item_size_max / factor) { /* Make sure items are always n-byte aligned */ if (size % CHUNK_ALIGN_BYTES) size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES); @@ -161,7 +161,7 @@ static void slabs_preallocate (const unsigned int maxslabs) { list. if you really don't want this, you can rebuild without these three lines. */ - for (i = POWER_SMALLEST; i <= MAX_NUMBER_OF_SLAB_CLASSES; i++) { + for (i = POWER_SMALLEST; i < MAX_NUMBER_OF_SLAB_CLASSES; i++) { if (++prealloc > maxslabs) return; if (do_slabs_newslab(i) == 0) { @@ -234,7 +234,6 @@ static void *do_slabs_alloc(const size_t size, unsigned int id, unsigned int *to MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0); return NULL; } - p = &slabclass[id]; assert(p->sl_curr == 0 || ((item *)p->slots)->slabs_clsid == 0);