From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Return-Path: From: Cyrill Gorcunov Subject: [PATCH 2/2] lib/core/fiber: Relax stack memory usage on recycle Date: Fri, 15 Mar 2019 23:58:02 +0300 Message-Id: <20190315205802.18847-3-gorcunov@gmail.com> In-Reply-To: <20190315205802.18847-1-gorcunov@gmail.com> References: <20190315205802.18847-1-gorcunov@gmail.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit To: Vladimir Davydov Cc: tml , Cyrill Gorcunov List-ID: We want to detect a situation where task in fiber is too eager for stack memory and relax rss usage in such case. For this sake upon stack creation we put 8 marks near 64K bound (such params allows us to fill ~1/4 of a page, which seem reasonable but we might change this params with time). Once stack get recycled we investigate the marks and if they were overwritten we drop all pages behind to relax memory usage (if OS supports madvise syscall). Another important moment is that we're marking the whole stack as not present thus if fiber never stepped over 64K limit the marks will be in tact and it means the fibers are light ones there won't be much #pf in future. Later we plan to implement an intelligent fiber scheduling considering how many memory fibers consume in average. Part-of #3418 --- src/lib/core/fiber.c | 182 +++++++++++++++++++++++++++++++++++++++++++ src/lib/core/fiber.h | 12 +++ 2 files changed, 194 insertions(+) diff --git a/src/lib/core/fiber.c b/src/lib/core/fiber.c index bf2a22bed..4739e2708 100644 --- a/src/lib/core/fiber.c +++ b/src/lib/core/fiber.c @@ -104,6 +104,31 @@ static const struct fiber_attr fiber_attr_default = { .flags = FIBER_DEFAULT_FLAGS }; +#ifndef TARGET_OS_DARWIN +/* + * Random values generated with uuid. + */ +static const uint64_t poison_pool[] = { + 0x74f31d37285c4c37, 0xb10269a05bf10c29, + 0x0994d845bd284e0f, 0x9ffd4f7129c184df, + 0x357151e6711c4415, 0x8c5e5f41aafe6f28, + 0x6917dd79e78049d5, 0xba61957c65ca2465, +}; + +/* + * We poison by 8 bytes as it natural for stack + * step on x86-64. Also 128 byte gap between + * poison values should cover a common cases. + */ +#define POISON_SIZE (sizeof(poison_pool) / sizeof(poison_pool[0])) +#define POISON_GAP (128 + sizeof(poison_pool[0])) +#define POISON_OFF (POISON_GAP / sizeof(poison_pool[0])) + +static void fiber_wmark_recycle(struct fiber *fiber); +#else +# define fiber_wmark_recycle(fiber) +#endif /* !TARGET_OS_DARWIN */ + void fiber_attr_create(struct fiber_attr *fiber_attr) { @@ -624,6 +649,7 @@ fiber_recycle(struct fiber *fiber) /* no pending wakeup */ assert(rlist_empty(&fiber->state)); bool has_custom_stack = fiber->flags & FIBER_CUSTOM_STACK; + fiber_wmark_recycle(fiber); fiber_reset(fiber); fiber->name[0] = '\0'; fiber->f = NULL; @@ -710,6 +736,160 @@ page_align_up(void *ptr) return page_align_down(ptr + page_size - 1); } +#ifndef TARGET_OS_DARWIN + +/** Test if address is page aligned. */ +static inline bool +is_page_aligned(void *ptr) +{ + return (uintptr_t)ptr & ~(page_size - 1); +} + +/** + * Check if stack poison values are present starting + * from the address provided. + */ +static bool +stack_has_wmark(void *addr) +{ + const uint64_t *src = poison_pool; + const uint64_t *dst = addr; + size_t i; + + for (i = 0; i < POISON_SIZE; i++) { + if (*dst != src[i]) + return false; + dst += POISON_OFF; + } + + return true; +} + +/** + * Put stack poison values starting + * from the address provided. + */ +static void +stack_put_wmark(void *addr) +{ + const uint64_t *src = poison_pool; + uint64_t *dst = addr; + size_t i; + + for (i = 0; i < POISON_SIZE; i++) { + *dst = src[i]; + dst += POISON_OFF; + } +} + +/** + * Shrink stack by dropping pages outside of rss limit. + */ +static void +stack_shrink(struct fiber *fiber) +{ + void *start, *end; + + /* + * When dropping pages make sure the page + * containing overflow mark is untouched. + * Same time no need to unmap the page which + * carries "shrink" wmark, since we're updating + * this page anyway. + */ + if (stack_direction < 0) { + start = fiber->stack; + end = page_align_down(fiber->stack_shrink_wmark); + } else { + end = fiber->stack + fiber->stack_size; + start = page_align_down(fiber->stack_shrink_wmark); + } + + assert(is_page_aligned(start)); + + madvise(start, end - start, MADV_DONTNEED); + stack_put_wmark(fiber->stack_shrink_wmark); +} + +/** + * Investigate stack watermarks on a fiber recycle. + */ +static void +fiber_wmark_recycle(struct fiber *fiber) +{ + if (fiber->stack == NULL || fiber->flags & FIBER_CUSTOM_STACK) + return; + + /* + * On recycle we're trying to shrink stack + * to release memory pressure but if only + * a fiber has been using too much memory. + */ + if (!stack_has_wmark(fiber->stack_shrink_wmark)) + stack_shrink(fiber); +} + +/** + * Initialize stack watermarks. + */ +static void +fiber_wmark_init(struct fiber *fiber) +{ + /* stack size not causing much memory pressure */ + static const unsigned rss_limit = 65536; + + /* offset base for marks distribution */ + static const unsigned offset_base = 128; + + /* + * No tracking on custom stacks for simplicity. + */ + if (fiber->flags & FIBER_CUSTOM_STACK) { + fiber->stack_shrink_wmark = NULL; + fiber->wmark_inpage_offset = 0; + return; + } + + /* + * We don't expect the whole stack usage in regular + * loads, lets try to minimize rss pressure. + */ + assert(is_page_aligned(fiber->stack)); + madvise(fiber->stack, fiber->stack_size, MADV_DONTNEED); + + /* + * To increase probability of the stack overflow + * detection we put first mark at random position + * of the first @inpage_offset_base bytes range. + * The rest of the marks are put with constant step + * simply to not carry offsets in memory. + */ + fiber->wmark_inpage_offset = rand() % offset_base; + fiber->wmark_inpage_offset = (fiber->wmark_inpage_offset + 8) & ~7; + + /* + * Initially we arm the last page of the stack + * to catch if we're getting close to its exhausting. + * + * The shrink watermark is put at 64K limit which is + * known value to not cause much memory pressue even + * with large number of fibers. + */ + if (stack_direction < 0) { + fiber->stack_shrink_wmark = fiber->stack + fiber->stack_size; + fiber->stack_shrink_wmark -= rss_limit; + fiber->stack_shrink_wmark += fiber->wmark_inpage_offset; + } else { + fiber->stack_shrink_wmark = fiber->stack + rss_limit; + fiber->stack_shrink_wmark -= page_size; + fiber->stack_shrink_wmark += fiber->wmark_inpage_offset; + } + stack_put_wmark(fiber->stack_shrink_wmark); +} +#else +# define fiber_wmark_init(fiber) +#endif /* !TARGET_OS_DARWIN */ + static int fiber_stack_create(struct fiber *fiber, size_t stack_size) { @@ -750,6 +930,7 @@ fiber_stack_create(struct fiber *fiber, size_t stack_size) (char *)fiber->stack + fiber->stack_size); + fiber_wmark_init(fiber); mprotect(guard, page_size, PROT_NONE); return 0; } @@ -923,6 +1104,7 @@ cord_create(struct cord *cord, const char *name) cord->sched.stack = NULL; cord->sched.stack_size = 0; #endif + cord->sched.stack_shrink_wmark = NULL; } void diff --git a/src/lib/core/fiber.h b/src/lib/core/fiber.h index f1f5a0555..e1364d413 100644 --- a/src/lib/core/fiber.h +++ b/src/lib/core/fiber.h @@ -348,6 +348,18 @@ struct fiber { struct slab *stack_slab; /** Coro stack addr. */ void *stack; +#ifndef TARGET_OS_DARWIN + /** + * Stack watermark addr to detect + * if we need shrink stack on reuse. + */ + void *stack_shrink_wmark; + /** + * An offset to watermark position in stack + * since page bound address. + */ + unsigned int wmark_inpage_offset; +#endif /** Coro stack size. */ size_t stack_size; /** Valgrind stack id. */ -- 2.20.1