From 1709e1516c166e1c09461d376535fa6e3a42e279 Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:08:15 -0700 Subject: [PATCH 01/38] fix compatibility mi_theap_calloc macro --- include/mimalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/mimalloc.h b/include/mimalloc.h index d46bc9da..0a813295 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -379,7 +379,7 @@ typedef mi_heap_t mi_theap_t; #define mi_theap_collect(hp,force) mi_heap_collect(hp,force) #define mi_theap_malloc(hp,sz) mi_heap_malloc(hp,sz) #define mi_theap_zalloc(hp,sz) mi_heap_zalloc(hp,sz) -#define mi_theap_calloc(hp,cnt,sz) mi_heap_malloc(hp,cnt,sz) +#define mi_theap_calloc(hp,cnt,sz) mi_heap_calloc(hp,cnt,sz) #define mi_theap_malloc_small(hp,sz) mi_heap_malloc_small(hp,sz) #define mi_theap_malloc_aligned(hp,sz,a) mi_heap_malloc_aligned(hp,sz,a) #define mi_theap_realloc(hp,p,newsz) mi_heap_realloc(hp,p,newsz) From 4bb24ca1c6379786a671497330aeb8907c18858e Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:20:53 -0700 Subject: [PATCH 02/38] clear committed bits on commit failure --- src/arena.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/arena.c b/src/arena.c index a88fcda6..c8e1a544 100644 --- a/src/arena.c +++ b/src/arena.c @@ -270,6 +270,8 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar const size_t stat_commit_size = commit_size - mi_arena_block_size(already_committed); bool commit_zero = false; if (!_mi_os_commit_ex(p, commit_size, &commit_zero, stat_commit_size)) { + // set all as uncommitted on commit failure + _mi_bitmap_unclaim_across(arena->blocks_committed, arena->field_count, needed_bcount, bitmap_index); memid->initially_committed = false; } else { From d716fcfba342df7f393c1b6d7f0a2b38b341876a Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:23:53 -0700 Subject: [PATCH 03/38] check for NULL in guarded pointer setup --- src/alloc.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/src/alloc.c b/src/alloc.c index 5d114025..2ff8ecf4 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -708,23 +708,22 @@ mi_decl_restrict void* _mi_heap_malloc_guarded(mi_heap_t* heap, size_t size, boo mi_block_t* const block = (mi_block_t*)_mi_malloc_generic(heap, req_size, false /* don't zero */, 0 /* huge_alignment */, NULL); if (block==NULL) return NULL; void* const p = mi_block_ptr_set_guarded(block, obj_size); + if (p == NULL) return NULL; if (zero) { _mi_memzero(p,obj_size); // we have to zero afterwards as padding might have written inside the block (if the `blocksize > reqsize + os_page_size`) } // stats - mi_track_malloc(p, obj_size, zero); - if (p != NULL) { - if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } - #if MI_STAT>1 - // adjust stats to only count the allocated size of the block (and not the guard page) - mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size); - mi_heap_stat_increase(heap, malloc_requested, size); - #endif - _mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1); - } + mi_track_malloc(p, obj_size, zero); + if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); } + _mi_stat_counter_increase(&heap->tld->stats.malloc_guarded_count, 1); + #if MI_STAT>1 + // adjust stats to only count the allocated size of the block (and not the guard page) + mi_heap_stat_adjust_decrease(heap, malloc_requested, req_size); + mi_heap_stat_increase(heap, malloc_requested, size); + #endif #if MI_DEBUG>3 - if (p != NULL && zero) { + if (zero) { mi_assert_expensive(mi_mem_is_zero(p, size)); } #endif From 386971ff2c5fbe09765c36ba1ead552987c2973a Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:26:08 -0700 Subject: [PATCH 04/38] check for NULL on guarded aligned allocation --- src/alloc-aligned.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/alloc-aligned.c b/src/alloc-aligned.c index d7e2be78..d36f3587 100644 --- a/src/alloc-aligned.c +++ b/src/alloc-aligned.c @@ -33,8 +33,9 @@ static mi_decl_noinline mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi return NULL; } const size_t oversize = size + alignment - 1; - void* base = _mi_heap_malloc_guarded(heap, oversize, zero); - void* p = _mi_align_up_ptr(base, alignment); + void* const base = _mi_heap_malloc_guarded(heap, oversize, zero); + if (base==NULL) return NULL; + void* const p = _mi_align_up_ptr(base, alignment); mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size); mi_assert_internal(mi_usable_size(p) >= size); mi_assert_internal(_mi_is_aligned(p, alignment)); From c92b46822aab5a95862483c893c6b6c6214fcbff Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:30:26 -0700 Subject: [PATCH 05/38] fix clearing the weak field of the random context in chacha --- src/random.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/random.c b/src/random.c index 59983e93..e82fa533 100644 --- a/src/random.c +++ b/src/random.c @@ -99,7 +99,8 @@ static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t no // since we only use chacha for randomness (and not encryption) we // do not _need_ to read 32-bit values as little endian but we do anyways // just for being compatible :-) - memset(ctx, 0, sizeof(*ctx)); + ctx->output_available = 0; + _mi_memzero(ctx->output,sizeof(ctx->output)); for (size_t i = 0; i < 4; i++) { const uint8_t* sigma = (uint8_t*)"expand 32-byte k"; ctx->input[i] = read32(sigma,i); From e86309889d7211c442a63570426f9323ea1164dd Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:36:52 -0700 Subject: [PATCH 06/38] use atomic variable for the deferred free function --- src/page.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/page.c b/src/page.c index 2a421e63..4653748a 100644 --- a/src/page.c +++ b/src/page.c @@ -895,21 +895,23 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { a certain number of allocations. ----------------------------------------------------------- */ -static mi_deferred_free_fun* volatile deferred_free = NULL; -static _Atomic(void*) deferred_arg; // = NULL +static _Atomic(void*) deferred_free; // is `mi_deferred_free_fun*` (but some platforms don't support atomic function pointers) +static _Atomic(void*) deferred_arg; void _mi_deferred_free(mi_heap_t* heap, bool force) { heap->tld->heartbeat++; - if (deferred_free != NULL && !heap->tld->recurse) { + mi_deferred_free_fun* const fun = (mi_deferred_free_fun*)mi_atomic_load_ptr_acquire(void,&deferred_free); + if (fun != NULL && !heap->tld->recurse) { heap->tld->recurse = true; - deferred_free(force, heap->tld->heartbeat, mi_atomic_load_ptr_relaxed(void,&deferred_arg)); + void* const arg = mi_atomic_load_ptr_acquire(void,&deferred_arg); + fun(force, heap->tld->heartbeat, arg); heap->tld->recurse = false; } } void mi_register_deferred_free(mi_deferred_free_fun* fn, void* arg) mi_attr_noexcept { - deferred_free = fn; mi_atomic_store_ptr_release(void,&deferred_arg, arg); + mi_atomic_store_ptr_release(void,&deferred_free, (void*)fn); } From c6e8d111c31198e93d35f90c0f69cf548937eb57 Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:38:02 -0700 Subject: [PATCH 07/38] use atomic once initialization of auto_thread_done --- src/init.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/init.c b/src/init.c index f9b7b39d..fd622409 100644 --- a/src/init.c +++ b/src/init.c @@ -459,11 +459,10 @@ static bool _mi_thread_heap_done(mi_heap_t* heap) { // Set up handlers so `mi_thread_done` is called automatically static void mi_process_setup_auto_thread_done(void) { - static bool tls_initialized = false; // fine if it races - if (tls_initialized) return; - tls_initialized = true; - _mi_prim_thread_init_auto_done(); - _mi_heap_set_default_direct(&_mi_heap_main); + mi_atomic_do_once { + _mi_prim_thread_init_auto_done(); + _mi_heap_set_default_direct(&_mi_heap_main); + } } From d0cf283502b8ec3f08a7e78633c38da9b2bdc038 Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:39:17 -0700 Subject: [PATCH 08/38] check for NULL subproc in subproc_delete --- src/init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/init.c b/src/init.c index fd622409..7229bad7 100644 --- a/src/init.c +++ b/src/init.c @@ -254,6 +254,7 @@ mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id) { void mi_subproc_delete(mi_subproc_id_t subproc_id) { if (subproc_id == NULL) return; mi_subproc_t* subproc = _mi_subproc_from_id(subproc_id); + if (subproc==NULL) return; // check if there are no abandoned segments still.. bool safe_to_delete = false; mi_lock(&subproc->abandoned_os_lock) { From 291ad74ef9bcee164d1a3920e07961d100a31c47 Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:46:08 -0700 Subject: [PATCH 09/38] check for overflow _mi_os_alloc_aligned_at_offset --- src/os.c | 3 ++- src/prim/windows/prim.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/os.c b/src/os.c index 26d8be4d..2eefe7df 100644 --- a/src/os.c +++ b/src/os.c @@ -409,7 +409,7 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse mi_assert(offset <= size); mi_assert((alignment % _mi_os_page_size()) == 0); *memid = _mi_memid_none(); - if (offset > MI_SEGMENT_SIZE) return NULL; + if (offset > MI_SEGMENT_SIZE || offset > size) return NULL; if (offset == 0) { // regular aligned allocation return _mi_os_alloc_aligned(size, alignment, commit, allow_large, memid); @@ -417,6 +417,7 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse else { // overallocate to align at an offset const size_t extra = _mi_align_up(offset, alignment) - offset; + if (size >= SIZE_MAX - extra) return NULL; // too large const size_t oversize = size + extra; void* const start = _mi_os_alloc_aligned(oversize, alignment, commit, allow_large, memid); if (start == NULL) return NULL; diff --git a/src/prim/windows/prim.c b/src/prim/windows/prim.c index cd3bbeca..0d9bb2ac 100644 --- a/src/prim/windows/prim.c +++ b/src/prim/windows/prim.c @@ -613,7 +613,7 @@ void _mi_prim_out_stderr( const char* msg ) // Note: on windows, environment names are not case sensitive. bool _mi_prim_getenv(const char* name, char* result, size_t result_size) { result[0] = 0; - size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); + const size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size); return (len > 0 && len < result_size); } From e14c52295e20e17646edb66dd7630167b5591b7d Mon Sep 17 00:00:00 2001 From: Daan Date: Mon, 20 Apr 2026 13:57:28 -0700 Subject: [PATCH 10/38] propagate weak field on random context split --- src/random.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/random.c b/src/random.c index e82fa533..d5d68e7f 100644 --- a/src/random.c +++ b/src/random.c @@ -115,7 +115,8 @@ static void chacha_init(mi_random_ctx_t* ctx, const uint8_t key[32], uint64_t no } static void chacha_split(mi_random_ctx_t* ctx, uint64_t nonce, mi_random_ctx_t* ctx_new) { - memset(ctx_new, 0, sizeof(*ctx_new)); + _mi_memzero(ctx_new, sizeof(*ctx_new)); + ctx_new->weak = ctx->weak; _mi_memcpy(ctx_new->input, ctx->input, sizeof(ctx_new->input)); ctx_new->input[12] = 0; ctx_new->input[13] = 0; From b64269339ca05bc2b90693cae6341a8e6c33f4d9 Mon Sep 17 00:00:00 2001 From: Kleis Auke Wolthuizen Date: Tue, 21 Apr 2026 13:26:39 +0200 Subject: [PATCH 11/38] Emscripten: add missing include for `getentropy()` --- src/prim/emscripten/prim.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/prim/emscripten/prim.c b/src/prim/emscripten/prim.c index c4cfc35d..64e96d33 100644 --- a/src/prim/emscripten/prim.c +++ b/src/prim/emscripten/prim.c @@ -12,6 +12,8 @@ terms of the MIT license. A copy of the license can be found in the file #include "mimalloc/atomic.h" #include "mimalloc/prim.h" +#include // getentropy + // Design // ====== // From 50780e7475a2181a9e29107653932f434ff1d5f2 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 21 Apr 2026 11:00:46 -0700 Subject: [PATCH 12/38] add stale labeling workflow --- .github/workflows/stale.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .github/workflows/stale.yaml diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 00000000..02ee3314 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,26 @@ +name: Close inactive issues +on: + workflow_dispatch: # allow running the workflow manually + schedule: + - cron: "15 21 * * *" # minute, hour, day (1-31), month (1-12), day of the week (0 - 6 or SUN-SAT) + +jobs: + close-issues: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v10 + with: + days-before-issue-stale: 360 + days-before-issue-close: 14 + stale-issue-label: "stale" + stale-issue-message: "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs in the next 14 days. Thank you for your contributions!" + close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale. Please feel free to reopen if this is still an active issue." + days-before-pr-stale: -1 + days-before-pr-close: -1 + stale-pr-label: "stale" + stale-pr-message: "This PR has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs in the next 14 days. Thank you for your contributions!" + close-pr-message: "This PR was closed because it has been inactive for 14 days since being marked as stale. Please feel free to reopen if you think this PR should still be considered. Thank you again for your help." + repo-token: ${{ secrets.GITHUB_TOKEN }} From dc3572c241d906039b537df3874ff417c0f1d80d Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 21 Apr 2026 11:10:33 -0700 Subject: [PATCH 13/38] rename stale workflow --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 02ee3314..b0fbdfe6 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -1,9 +1,9 @@ -name: Close inactive issues on: workflow_dispatch: # allow running the workflow manually schedule: - cron: "15 21 * * *" # minute, hour, day (1-31), month (1-12), day of the week (0 - 6 or SUN-SAT) +name: Stale jobs: close-issues: runs-on: ubuntu-latest From 5021a39641d2618503796180d9d74ad38e0b2a40 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 21 Apr 2026 11:12:59 -0700 Subject: [PATCH 14/38] add push trigger to activate stale workflow once --- .github/workflows/stale.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index b0fbdfe6..690e398f 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -1,9 +1,12 @@ on: - workflow_dispatch: # allow running the workflow manually + workflow_dispatch: # allow running the workflow manually schedule: - cron: "15 21 * * *" # minute, hour, day (1-31), month (1-12), day of the week (0 - 6 or SUN-SAT) + push: + branches: + - 'dev*' # for now to activate it -name: Stale +name: Close inactive issues jobs: close-issues: runs-on: ubuntu-latest From 57cdd0d7a017b7fa1c935127655200af3cc059eb Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 21 Apr 2026 11:17:10 -0700 Subject: [PATCH 15/38] remove push trigger from stale workflow --- .github/workflows/stale.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 690e398f..dff9cb65 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -2,9 +2,6 @@ on: workflow_dispatch: # allow running the workflow manually schedule: - cron: "15 21 * * *" # minute, hour, day (1-31), month (1-12), day of the week (0 - 6 or SUN-SAT) - push: - branches: - - 'dev*' # for now to activate it name: Close inactive issues jobs: @@ -26,4 +23,5 @@ jobs: stale-pr-label: "stale" stale-pr-message: "This PR has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs in the next 14 days. Thank you for your contributions!" close-pr-message: "This PR was closed because it has been inactive for 14 days since being marked as stale. Please feel free to reopen if you think this PR should still be considered. Thank you again for your help." + operations-per-run: 32 repo-token: ${{ secrets.GITHUB_TOKEN }} From e240131e2d8970f8c5d6c0ea9c31446dba357cd6 Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 22 Apr 2026 11:01:59 -0700 Subject: [PATCH 16/38] clarify use of deferred_free (issue #1271, issue 3.6) --- src/page.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/page.c b/src/page.c index 4653748a..10b85e6d 100644 --- a/src/page.c +++ b/src/page.c @@ -895,6 +895,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) { a certain number of allocations. ----------------------------------------------------------- */ +// The program should only install a single deferred free handler before doing allocation. static _Atomic(void*) deferred_free; // is `mi_deferred_free_fun*` (but some platforms don't support atomic function pointers) static _Atomic(void*) deferred_arg; From f1b98b26222e3c32ec25d3b091acb895d0263faf Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 22 Apr 2026 11:13:20 -0700 Subject: [PATCH 17/38] let unix_madvise always return an error code (issue #1271, issues 3.8) --- src/prim/unix/prim.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index 7330e915..949c2487 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -229,15 +229,17 @@ int _mi_prim_free(void* addr, size_t size ) { // mmap //--------------------------------------------- +// return errno on failure static int unix_madvise(void* addr, size_t size, int advice) { #if defined(__sun) - int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + const int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) + return (res==0 ? 0 : errno); #elif defined(__QNX__) - int res = posix_madvise(addr, size, advice); + return posix_madvise(addr, size, advice); // posix returns errno #else - int res = madvise(addr, size, advice); - #endif + const int res = madvise(addr, size, advice); // linux returns -1 on failure and sets errno return (res==0 ? 0 : errno); + #endif } static void* unix_mmap_prim(void* addr, size_t size, int protect_flags, int flags, int fd) { @@ -514,8 +516,8 @@ int _mi_prim_reset(void* start, size_t size) { // default `MADV_DONTNEED` is used though. static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE); int oadvice = (int)mi_atomic_load_relaxed(&advice); - while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; }; - if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) { + while ((err = unix_madvise(start, size, oadvice)) != 0 && err == EAGAIN) { /* try again */ }; + if (err == EINVAL && oadvice == MADV_FREE) { // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED); err = unix_madvise(start, size, MADV_DONTNEED); From 52d5661fcdf56aaaed3cbdc8659bb75cd99a68cc Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 22 Apr 2026 11:21:19 -0700 Subject: [PATCH 18/38] fix comparison in mi_os_alloc_aligned_at_offset to allow decommitting exactly one page (issue #1271, 3.9) --- src/os.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/os.c b/src/os.c index 2eefe7df..037dc75e 100644 --- a/src/os.c +++ b/src/os.c @@ -425,7 +425,7 @@ void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t offse void* const p = (uint8_t*)start + extra; mi_assert(_mi_is_aligned((uint8_t*)p + offset, alignment)); // decommit the overallocation at the start - if (commit && extra > _mi_os_page_size()) { + if (commit && extra >= _mi_os_page_size()) { _mi_os_decommit(start, extra); } return p; From 3ef9c7732a8238f9472848a6086476bf95f0f869 Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 22 Apr 2026 11:33:50 -0700 Subject: [PATCH 19/38] use _mi_is_aligned instead of modulo (issue #1271, issues 3.11) --- include/mimalloc/internal.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index 0368cf17..c32a2b16 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -376,8 +376,7 @@ static inline bool _mi_is_power_of_two(uintptr_t x) { // Is a pointer aligned? static inline bool _mi_is_aligned(void* p, size_t alignment) { - mi_assert_internal(alignment != 0); - return (((uintptr_t)p % alignment) == 0); + return (alignment==0 || ((uintptr_t)p % alignment) == 0); } // Align upwards From 58b36a3e290cba77a0501fc623501bfbd0547c59 Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 22 Apr 2026 11:45:17 -0700 Subject: [PATCH 20/38] on emscripten backend, delete the tls key on shutdown (issue #1271, issue 3.26, and issue #809) --- src/prim/emscripten/prim.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/prim/emscripten/prim.c b/src/prim/emscripten/prim.c index 64e96d33..93c76757 100644 --- a/src/prim/emscripten/prim.c +++ b/src/prim/emscripten/prim.c @@ -225,7 +225,9 @@ void _mi_prim_thread_init_auto_done(void) { } void _mi_prim_thread_done_auto_done(void) { - // nothing to do + if (_mi_heap_default_key != (pthread_key_t)(-1)) { // do not leak the key, see issue #809 + pthread_key_delete(_mi_heap_default_key); + } } void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) { From 68b7a8027ade2d69ad545d5abc56d36a2b1fe80c Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 10:38:32 -0700 Subject: [PATCH 21/38] use _mi_is_aligned instead of % --- src/alloc-posix.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/alloc-posix.c b/src/alloc-posix.c index 175bf15f..60639ff4 100644 --- a/src/alloc-posix.c +++ b/src/alloc-posix.c @@ -61,14 +61,14 @@ int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept if (alignment==0 || !_mi_is_power_of_two(alignment)) return EINVAL; // not a power of 2 void* q = mi_malloc_aligned(size, alignment); if (q==NULL && size != 0) return ENOMEM; - mi_assert_internal(((uintptr_t)q % alignment) == 0); + mi_assert_internal(_mi_is_aligned(q,alignment)); *p = q; return 0; } mi_decl_nodiscard mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept { void* p = mi_malloc_aligned(size, alignment); - mi_assert_internal(((uintptr_t)p % alignment) == 0); + mi_assert_internal(_mi_is_aligned(p,alignment)); return p; } @@ -95,7 +95,7 @@ mi_decl_nodiscard mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size */ // C11 also requires alignment to be a power-of-two (and > 0) which is checked in mi_malloc_aligned void* p = mi_malloc_aligned(size, alignment); - mi_assert_internal(((uintptr_t)p % alignment) == 0); + mi_assert_internal(_mi_is_aligned(p,alignment)); return p; } From 7865180a70b647f93620f33261ec8d0c90e693d7 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 18:10:27 -0700 Subject: [PATCH 22/38] use lock for initial output buffer (issue #1271, issue 3.12) --- src/options.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/src/options.c b/src/options.c index 64ec6b0b..540340ef 100644 --- a/src/options.c +++ b/src/options.c @@ -338,33 +338,39 @@ static void mi_cdecl mi_out_stderr(const char* msg, void* arg) { #endif static char out_buf[MI_MAX_DELAY_OUTPUT+1]; static _Atomic(size_t) out_len; +static mi_lock_t out_buf_lock = MI_LOCK_INITIALIZER; static void mi_cdecl mi_out_buf(const char* msg, void* arg) { MI_UNUSED(arg); if (msg==NULL) return; - if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return; + if (mi_atomic_load_acquire(&out_len)>=MI_MAX_DELAY_OUTPUT) return; size_t n = _mi_strlen(msg); - if (n==0) return; - // claim space - size_t start = mi_atomic_add_acq_rel(&out_len, n); - if (start >= MI_MAX_DELAY_OUTPUT) return; - // check bound - if (start+n >= MI_MAX_DELAY_OUTPUT) { - n = MI_MAX_DELAY_OUTPUT-start-1; + if (n==0 || n >= MI_MAX_DELAY_OUTPUT) return; + // copy msg into the buffer + mi_lock(&out_buf_lock) { + const size_t start = mi_atomic_add_acq_rel(&out_len, n); + if (start < MI_MAX_DELAY_OUTPUT) { + // check bound + if (start+n >= MI_MAX_DELAY_OUTPUT) { + n = MI_MAX_DELAY_OUTPUT-start-1; + } + _mi_memcpy(&out_buf[start], msg, n); + } } - _mi_memcpy(&out_buf[start], msg, n); } static void mi_out_buf_flush(mi_output_fun* out, bool no_more_buf, void* arg) { if (out==NULL) return; // claim (if `no_more_buf == true`, no more output will be added after this point) - size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); - // and output the current contents - if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; - out_buf[count] = 0; - out(out_buf,arg); - if (!no_more_buf) { - out_buf[count] = '\n'; // if continue with the buffer, insert a newline + mi_lock(&out_buf_lock) { + size_t count = mi_atomic_add_acq_rel(&out_len, (no_more_buf ? MI_MAX_DELAY_OUTPUT : 1)); + // and output the current contents + if (count>MI_MAX_DELAY_OUTPUT) count = MI_MAX_DELAY_OUTPUT; + out_buf[count] = 0; + out(out_buf,arg); + if (!no_more_buf) { + out_buf[count] = '\n'; // if continue with the buffer, insert a newline + } } } From f437fb8fd914a7d1b27c0615073b99f3534f16f5 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 18:16:21 -0700 Subject: [PATCH 23/38] change out_default to be atomic --- src/options.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/options.c b/src/options.c index 540340ef..cac6b38b 100644 --- a/src/options.c +++ b/src/options.c @@ -388,28 +388,29 @@ static void mi_cdecl mi_out_buf_stderr(const char* msg, void* arg) { // Default output handler // -------------------------------------------------------- -// Should be atomic but gives errors on many platforms as generally we cannot cast a function pointer to a uintptr_t. -// For now, don't register output from multiple threads. -static mi_output_fun* volatile mi_out_default; // = NULL +// The program should only install a single output handler from a single thread +// since otherwise the argument and output function may not match. +static _Atomic(void*) mi_out_default; // = // is `mi_output_fun*` (but some platforms don't support atomic function pointers) static _Atomic(void*) mi_out_arg; // = NULL static mi_output_fun* mi_out_get_default(void** parg) { + mi_output_fun* const out = (mi_output_fun*)mi_atomic_load_ptr_acquire(void,&mi_out_default); if (parg != NULL) { *parg = mi_atomic_load_ptr_acquire(void,&mi_out_arg); } - mi_output_fun* out = mi_out_default; return (out == NULL ? &mi_out_buf : out); } void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept { - mi_out_default = (out == NULL ? &mi_out_stderr : out); // stop using the delayed output buffer + mi_atomic_store_ptr_release(void,&mi_out_default, (void*)(out == NULL ? &mi_out_stderr : out)); // stop using the delayed output buffer mi_atomic_store_ptr_release(void,&mi_out_arg, arg); - if (out!=NULL) mi_out_buf_flush(out,true,arg); // output all the delayed output now + if (out!=NULL) { mi_out_buf_flush(out,true,arg); } // output all the delayed output now } // add stderr to the delayed output after the module is loaded static void mi_add_stderr_output(void) { mi_assert_internal(mi_out_default == NULL); mi_out_buf_flush(&mi_out_stderr, false, NULL); // flush current contents to stderr - mi_out_default = &mi_out_buf_stderr; // and add stderr to the delayed output + mi_atomic_store_ptr_release(void,&mi_out_default,(void*)&mi_out_buf_stderr); // and add stderr to the delayed output + mi_atomic_store_ptr_release(void,&mi_out_arg,NULL); } // -------------------------------------------------------- From e49fb94ef3a9f907f1e5ff77dc30c3364ea9ff42 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 18:29:39 -0700 Subject: [PATCH 24/38] fix page used count in heap visitor to match all used blocks in a page (issue #1271, issue 3.16) --- src/heap.c | 7 +++++-- src/segment.c | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/heap.c b/src/heap.c index f9115a2f..84a26a55 100644 --- a/src/heap.c +++ b/src/heap.c @@ -576,7 +576,8 @@ bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_ mi_assert(page != NULL); if (page == NULL) return true; - _mi_page_free_collect(page,true); // collect both thread_delayed and local_free + // collect early so the page used count is reported correctly + // _mi_page_free_collect(page,true); // collect both thread_delayed and local_free mi_assert_internal(page->local_free == NULL); if (page->used == 0) return true; @@ -687,9 +688,11 @@ typedef bool (mi_heap_area_visit_fun)(const mi_heap_t* heap, const mi_heap_area_ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t* page, void* vfun, void* arg) { MI_UNUSED(heap); MI_UNUSED(pq); + if (page==NULL) return true; mi_heap_area_visit_fun* fun = (mi_heap_area_visit_fun*)vfun; mi_heap_area_ex_t xarea; - xarea.page = page; + xarea.page = page; + _mi_page_free_collect(page,true); // collect early so the page->used is accurate _mi_heap_area_init(&xarea.area, page); return fun(heap, &xarea, arg); } diff --git a/src/segment.c b/src/segment.c index f62b70c0..41f3dc5f 100644 --- a/src/segment.c +++ b/src/segment.c @@ -1371,6 +1371,7 @@ static bool mi_segment_visit_page(mi_page_t* page, bool visit_blocks, mi_block_v _mi_heap_area_init(&area, page); if (!visitor(NULL, &area, NULL, area.block_size, arg)) return false; if (visit_blocks) { + _mi_page_free_collect(page,true); // collect so the used count is accurate return _mi_heap_area_visit_blocks(&area, page, visitor, arg); } else { From ed6fe722518c70a117b2edea9fd776293758a631 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 18:36:40 -0700 Subject: [PATCH 25/38] add assertions that the bottom 2 bits of the threadid are zero'd --- include/mimalloc/prim.h | 4 +++- src/init.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/mimalloc/prim.h b/include/mimalloc/prim.h index 82c4a9ab..b963ad5d 100644 --- a/include/mimalloc/prim.h +++ b/include/mimalloc/prim.h @@ -282,7 +282,9 @@ static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept; #if defined(MI_PRIM_THREAD_ID) static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept { - return MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) + const mi_threadid_t tid = MI_PRIM_THREAD_ID(); // used for example by CPython for a free threaded build (see python/cpython#115488) + mi_assert_internal( (tid & 0x03) == 0 ); // mimalloc reserves the bottom 2 bits + return tid; } #elif defined(_WIN32) diff --git a/src/init.c b/src/init.c index 7229bad7..538e45a4 100644 --- a/src/init.c +++ b/src/init.c @@ -120,7 +120,9 @@ mi_decl_cache_align const mi_heap_t _mi_heap_empty = { mi_threadid_t _mi_thread_id(void) mi_attr_noexcept { - return _mi_prim_thread_id(); + mi_threadid_t tid = _mi_prim_thread_id(); + mi_assert_internal( (tid & 0x03) == 0 ); // mimalloc reserves the bottom 2 bits + return tid; } // the thread-local default heap for allocation From e146ce02c91be4f03cd62dfd55a2a0603bd689b6 Mon Sep 17 00:00:00 2001 From: Daan Date: Thu, 23 Apr 2026 18:51:56 -0700 Subject: [PATCH 26/38] more accurate memory accounting for aligned os memory (issue #1271, issue 3.22) --- src/os.c | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/src/os.c b/src/os.c index 037dc75e..a1ee51ff 100644 --- a/src/os.c +++ b/src/os.c @@ -259,23 +259,26 @@ static void* mi_os_prim_alloc(size_t size, size_t try_alignment, bool commit, bo // Primitive aligned allocation from the OS. // This function guarantees the allocated memory is aligned. -static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** base) { +static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid) { + mi_assert_internal(memid!=NULL); mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); - mi_assert_internal(is_large != NULL); - mi_assert_internal(is_zero != NULL); - mi_assert_internal(base != NULL); + _mi_memzero(memid,sizeof(*memid)); if (!commit) allow_large = false; if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; size = _mi_align_up(size, _mi_os_page_size()); // try first with a requested alignment hint (this will usually be aligned directly on Win 10+ or BSD) - void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, is_large, is_zero); + bool os_is_large = false; + bool os_is_zero = false; + void* os_base = NULL; + size_t os_size = size; + void* p = mi_os_prim_alloc(size, alignment, commit, allow_large, &os_is_large, &os_is_zero); if (p == NULL) return NULL; // aligned already? if (((uintptr_t)p % alignment) == 0) { - *base = p; + os_base = p; } else { // if not aligned, free it, overallocate, and unmap around it @@ -288,43 +291,47 @@ static void* mi_os_prim_alloc_aligned(size_t size, size_t alignment, bool commit if (!mi_os_mem_config.has_partial_free) { // win32 virtualAlloc cannot free parts of an allocated block // over-allocate uncommitted (virtual) memory - p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, is_zero); + p = mi_os_prim_alloc(over_size, 1 /*alignment*/, false /* commit? */, false /* allow_large */, &os_is_large, &os_is_zero); if (p == NULL) return NULL; // set p to the aligned part in the full region // note: this is dangerous on Windows as VirtualFree needs the actual base pointer // this is handled though by having the `base` field in the memid's - *base = p; // remember the base + os_base = p; // remember the base + os_size = over_size; p = _mi_align_up_ptr(p, alignment); // explicitly commit only the aligned part if (commit) { if (!_mi_os_commit(p, size, NULL)) { - mi_os_prim_free(*base, over_size, 0); + mi_os_prim_free(os_base, over_size, 0); return NULL; } } } else { // mmap can free inside an allocation // overallocate... - p = mi_os_prim_alloc(over_size, 1, commit, false, is_large, is_zero); + p = mi_os_prim_alloc(over_size, 1, commit, false, &os_is_large, &os_is_zero); if (p == NULL) return NULL; // and selectively unmap parts around the over-allocated area. - void* aligned_p = _mi_align_up_ptr(p, alignment); - size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; - size_t mid_size = _mi_align_up(size, _mi_os_page_size()); - size_t post_size = over_size - pre_size - mid_size; + void* const aligned_p = _mi_align_up_ptr(p, alignment); + const size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; + const size_t mid_size = _mi_align_up(size, _mi_os_page_size()); + const size_t post_size = over_size - pre_size - mid_size; mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size); if (pre_size > 0) { mi_os_prim_free(p, pre_size, (commit ? pre_size : 0)); } if (post_size > 0) { mi_os_prim_free((uint8_t*)aligned_p + mid_size, post_size, (commit ? post_size : 0)); } // we can return the aligned pointer on `mmap` systems p = aligned_p; - *base = aligned_p; // since we freed the pre part, `*base == p`. + os_base = aligned_p; // since we freed the pre part, `*base == p`. + os_size = mid_size; } } - mi_assert_internal(p == NULL || (p != NULL && *base != NULL && ((uintptr_t)p % alignment) == 0)); + mi_assert_internal(p != NULL && os_base != NULL && _mi_is_aligned(p,alignment)); + mi_assert_internal(os_base <= p && size <= os_size); + *memid = _mi_memid_create_os(os_base,os_size,commit,os_is_zero,os_is_large); return p; } @@ -356,16 +363,9 @@ void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allo size = _mi_os_good_alloc_size(size); alignment = _mi_align_up(alignment, _mi_os_page_size()); - bool os_is_large = false; - bool os_is_zero = false; - void* os_base = NULL; - void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, &os_is_large, &os_is_zero, &os_base ); + void* p = mi_os_prim_alloc_aligned(size, alignment, commit, allow_large, memid ); if (p == NULL) return NULL; - *memid = _mi_memid_create_os(p, size, commit, os_is_zero, os_is_large); - memid->mem.os.base = os_base; - memid->mem.os.size += ((uint8_t*)p - (uint8_t*)os_base); // todo: return from prim_alloc_aligned? - mi_assert_internal(memid->mem.os.size >= size); mi_assert_internal(_mi_is_aligned(p,alignment)); if (commit) { mi_assert_internal(memid->initially_committed); } From 60a1f3bcfd3b44bd5010cb44a0d44cc5cd1a9375 Mon Sep 17 00:00:00 2001 From: daanx Date: Fri, 24 Apr 2026 12:22:20 -0700 Subject: [PATCH 27/38] update MSVC C atomics wrapper to implement loads as readonly and use more optimal arm instructions (issue #1277) --- include/mimalloc/atomic.h | 157 +++++++++++++++++++++++++------------- 1 file changed, 105 insertions(+), 52 deletions(-) diff --git a/include/mimalloc/atomic.h b/include/mimalloc/atomic.h index 917b7f67..5ac8da6f 100644 --- a/include/mimalloc/atomic.h +++ b/include/mimalloc/atomic.h @@ -155,15 +155,17 @@ static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) { #elif defined(_MSC_VER) // Deprecated: MSVC plain C compilation wrapper that uses Interlocked operations to model C11 atomics. -// It is recommended to always compile as C++ when using MSVC +// It is recommended to always compile as C++ when using MSVC. #include #ifdef _WIN64 -typedef LONG64 msc_intptr_t; -#define MI_64(f) f##64 +typedef LONG64 msc_intptr_t; +#define MI_MSC_64(f) f##64 +#define MI_MSC_XX(f) f##64 #else -typedef LONG msc_intptr_t; -#define MI_64(f) f +typedef LONG msc_intptr_t; +#define MI_MSC_64(f) f +#define MI_MSC_XX(f) f##32 #endif typedef enum mi_memory_order_e { @@ -177,23 +179,23 @@ typedef enum mi_memory_order_e { static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) { (void)(mo); - return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); + return (uintptr_t)MI_MSC_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add); } static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) { (void)(mo); - return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); + return (uintptr_t)MI_MSC_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub)); } static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { (void)(mo); - return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); + return (uintptr_t)MI_MSC_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x); } static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { (void)(mo); - return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); + return (uintptr_t)MI_MSC_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x); } static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) { (void)(mo1); (void)(mo2); - uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); + const uintptr_t read = (uintptr_t)MI_MSC_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected)); if (read == *expected) { return true; } @@ -207,68 +209,119 @@ static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p } static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) { (void)(mo); - return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); + return (uintptr_t)MI_MSC_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange); } static inline void mi_atomic_thread_fence(mi_memory_order mo) { (void)(mo); _Atomic(uintptr_t) x = 0; mi_atomic_exchange_explicit(&x, 1, mo); } + static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) { (void)(mo); -#if defined(_M_IX86) || defined(_M_X64) - return *p; -#else - uintptr_t x = *p; - if (mo > mi_memory_order_relaxed) { - while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ }; - } - return x; -#endif + // assert(mo<=mi_memory_order_acquire); // others are not used by mimalloc + #if defined(_M_IX86) || defined(_M_X64) + return (uintptr_t)MI_MSC_XX(__iso_volatile_load)((volatile const intptr_t*)p); + #elif defined(_M_ARM) || defined(_M_ARM64) + if (mo == mi_memory_order_relaxed) { + return (uintptr_t)MI_MSC_XX(__iso_volatile_load)((volatile const intptr_t*)p); + } + else if (mo <= mi_memory_order_acquire) { + return MI_MSC_XX(__ldar)((volatile const uintptr_t*)p); + } + else { + const uintptr_t u = (uintptr_t)MI_MSC_XX(__iso_volatile_load)((volatile const intptr_t*)p); + __dmb(15); // _ARM(64)_BARRIER_SY + return u; + } + #else + #warning "define mi_atomic_load_explicit for MSVC C compilation on this platform (which should be readonly, see issue #1277)" + return MI_MSC_XX(__iso_volatile_load)((volatile const intptr_t*)p); + #endif } static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) { (void)(mo); -#if defined(_M_IX86) || defined(_M_X64) - *p = x; -#else - mi_atomic_exchange_explicit(p, x, mo); -#endif + // assert(mo<=mi_memory_order_release); // others are not used by mimalloc + #if defined(_M_IX86) || defined(_M_X64) + MI_MSC_XX(__iso_volatile_store)((volatile intptr_t*)p, x); + #elif defined(_M_ARM) || defined(_M_ARM64) + if (mo == mi_memory_order_relaxed) { + MI_MSC_XX(__iso_volatile_store)((volatile intptr_t*)p, x); + } + else if (mo <= mi_memory_order_release) { + MI_MSC_XX(__stlr)((volatile uintptr_t*)p,x); + } + else { + mi_atomic_exchange_explicit(p, x, mo); + } + #else + mi_atomic_exchange_explicit(p, x, mo); + #endif } + static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) { (void)(mo); -#if defined(_M_X64) - return *p; -#else - int64_t old = *p; - int64_t x = old; - while ((old = InterlockedCompareExchange64(p, x, old)) != x) { - x = old; - } - return x; -#endif + // assert(mo<=mi_memory_order_acquire); // others are not used by mimalloc + #if defined(_M_IX86) || defined(_M_X64) + return __iso_volatile_load64((volatile const int64_t*)p); + #elif defined(_M_ARM) || defined(_M_ARM64) + if (mo == mi_memory_order_relaxed) { + return __iso_volatile_load64((volatile const int64_t*)p); + } + #if defined(_M_ARM64) + else if (mo <= mi_memory_order_acquire) { + return __ldar64((volatile const uintptr_t*)p); + } + #endif + else { + const int64_t i = __iso_volatile_load64((volatile const int64_t*)p); + __dmb(15); // _ARM(64)_BARRIER_SY + return i; + } + #else + #warning "define mi_atomic_loadi64_explicit for MSVC C compilation on this platform (which should be readonly, see issue #1277)" + return __iso_volatile_load64((volatile const int64_t*)p); + #endif } + static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) { (void)(mo); -#if defined(_M_X64) - *p = x; -#else - InterlockedExchange64(p, x); -#endif + // assert(mo<=mi_memory_order_release); // others are not used by mimalloc + #if defined(_M_IX86) || defined(_M_X64) + __iso_volatile_store64((volatile int64_t*)p,x); + #elif defined(_M_ARM) || defined(_M_ARM64) + if (mo == mi_memory_order_relaxed) { + __iso_volatile_store64((volatile int64_t*)p,x); + } + #if defined(_M_ARM64) + else if (mo == mi_memory_order_release) { + __stlr64((volatile uint64_t*)p, (uint64_t)x); + } + #endif + else { + InterlockedExchange64(p, x); + } + #else + InterlockedExchange64(p, x); + #endif } // These are used by the statistics static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) { -#ifdef _WIN64 - return (int64_t)mi_atomic_addi((int64_t*)p, add); -#else - int64_t current; - int64_t sum; - do { - current = *p; - sum = current + add; - } while (_InterlockedCompareExchange64(p, sum, current) != current); - return current; -#endif + #ifdef _WIN64 + return (int64_t)mi_atomic_addi((int64_t*)p, add); + #elif defined(_M_ARM) + return _InterlockedExchangeAdd64(p, add); + #else + // x86 + int64_t current; + int64_t sum; + do { + current = __iso_volatile_load64((volatile const int64_t*)p); + sum = current + add; + } while (_InterlockedCompareExchange64(p, sum, current) != current); + return current; + #endif } static inline void mi_atomic_void_addi64_relaxed(volatile int64_t* p, const volatile int64_t* padd) { const int64_t add = *padd; @@ -289,7 +342,7 @@ static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t } static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) { - int64_t read = _InterlockedCompareExchange64(p, des, *exp); + const int64_t read = _InterlockedCompareExchange64(p, des, *exp); if (read == *exp) { return true; } From 8c8eb3c753ba53ce2b2d285f526fc0984f13a2ec Mon Sep 17 00:00:00 2001 From: Daan Date: Sun, 26 Apr 2026 22:45:17 -0700 Subject: [PATCH 28/38] only count decommit if needs_recommit is true (issue #1281) --- src/os.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/os.c b/src/os.c index a1ee51ff..2e9f4e7f 100644 --- a/src/os.c +++ b/src/os.c @@ -495,8 +495,7 @@ bool _mi_os_commit(void* addr, size_t size, bool* is_zero) { static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, size_t stat_size) { mi_assert_internal(needs_recommit!=NULL); - mi_os_stat_decrease(committed, stat_size); - + // page align size_t csize; void* start = mi_os_page_align_area_conservative(addr, size, &csize); @@ -508,6 +507,9 @@ static bool mi_os_decommit_ex(void* addr, size_t size, bool* needs_recommit, siz if (err != 0) { _mi_warning_message("cannot decommit OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize); } + else if (*needs_recommit) { + mi_os_stat_decrease(committed, stat_size); + } mi_assert_internal(err == 0); return (err == 0); } From cf6ba6b73d7f19c9085451c368471dfe5a2906e4 Mon Sep 17 00:00:00 2001 From: Daan Date: Sun, 26 Apr 2026 22:47:23 -0700 Subject: [PATCH 29/38] fix unused variable warning (issue #1279) --- include/mimalloc/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/mimalloc/internal.h b/include/mimalloc/internal.h index c32a2b16..133a235b 100644 --- a/include/mimalloc/internal.h +++ b/include/mimalloc/internal.h @@ -350,7 +350,7 @@ mi_decl_noreturn mi_decl_cold void _mi_assert_fail(const char* assertion, const Inlined definitions ----------------------------------------------------------- */ #define MI_UNUSED(x) (void)(x) -#if (MI_DEBUG>0) +#if (MI_DEBUG>1) #define MI_UNUSED_RELEASE(x) #else #define MI_UNUSED_RELEASE(x) MI_UNUSED(x) From 50a711f54c850abb73a8ff42e99d629ca07d8e4c Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 27 Apr 2026 12:12:27 -0700 Subject: [PATCH 30/38] align on large OS page boundary for larger allocations --- src/os.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/os.c b/src/os.c index a1ee51ff..feb55df6 100644 --- a/src/os.c +++ b/src/os.c @@ -228,6 +228,13 @@ static void* mi_os_prim_alloc_at(void* hint_addr, size_t size, size_t try_alignm if (size == 0) return NULL; if (!commit) { allow_large = false; } if (try_alignment == 0) { try_alignment = 1; } // avoid 0 to ensure there will be no divide by zero when aligning + + // try to align along large OS page size for larger allocations + const size_t large_page_size = mi_os_mem_config.large_page_size; + if (large_page_size > 0 && hint_addr == NULL && size >= 8*large_page_size && _mi_is_power_of_two(try_alignment) && try_alignment < large_page_size) { + try_alignment = large_page_size; + } + *is_zero = false; void* p = NULL; int err = _mi_prim_alloc(hint_addr, size, try_alignment, commit, allow_large, is_large, is_zero, &p); From 3b7c8fb291dfbb501f2611604e358423d55a54d3 Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 27 Apr 2026 12:12:56 -0700 Subject: [PATCH 31/38] also use eager arena commit if large OS pages are allowed --- src/arena.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arena.c b/src/arena.c index c8e1a544..17c03afb 100644 --- a/src/arena.c +++ b/src/arena.c @@ -390,7 +390,7 @@ static bool mi_arena_reserve(size_t req_size, bool allow_large, mi_arena_id_t *a // commit eagerly? bool arena_commit = false; - if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit(); } + if (mi_option_get(mi_option_arena_eager_commit) == 2) { arena_commit = _mi_os_has_overcommit() || mi_option_is_enabled(mi_option_allow_large_os_pages); } else if (mi_option_get(mi_option_arena_eager_commit) == 1) { arena_commit = true; } return (mi_reserve_os_memory_ex(arena_reserve, arena_commit, allow_large, false /* exclusive? */, arena_id) == 0); From 688dbafbc22b623669751885a7bdc6fceb3ad464 Mon Sep 17 00:00:00 2001 From: daanx Date: Mon, 27 Apr 2026 12:39:36 -0700 Subject: [PATCH 32/38] remove unneeded try_alignment adjustment as that is done in os.c now --- src/prim/unix/prim.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/prim/unix/prim.c b/src/prim/unix/prim.c index 949c2487..a7a6b929 100644 --- a/src/prim/unix/prim.c +++ b/src/prim/unix/prim.c @@ -229,7 +229,7 @@ int _mi_prim_free(void* addr, size_t size ) { // mmap //--------------------------------------------- -// return errno on failure +// return errno on failure static int unix_madvise(void* addr, size_t size, int advice) { #if defined(__sun) const int res = madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520) @@ -419,10 +419,6 @@ int _mi_prim_alloc(void* hint_addr, size_t size, size_t try_alignment, bool comm mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); mi_assert_internal(commit || !allow_large); mi_assert_internal(try_alignment > 0); - if (hint_addr == NULL && size >= 8*MI_UNIX_LARGE_PAGE_SIZE && try_alignment > 1 && _mi_is_power_of_two(try_alignment) && try_alignment < MI_UNIX_LARGE_PAGE_SIZE) { - try_alignment = MI_UNIX_LARGE_PAGE_SIZE; // try to align along large page size for larger allocations - } - *is_zero = true; int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); *addr = unix_mmap(hint_addr, size, try_alignment, protect_flags, false, allow_large, is_large); From 5dfa1741f204a346918fef140d0d729881152037 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 28 Apr 2026 17:03:32 -0700 Subject: [PATCH 33/38] use mi_segment_is_abandoned instead of direct check --- src/segment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/segment.c b/src/segment.c index b06848f5..e82534e3 100644 --- a/src/segment.c +++ b/src/segment.c @@ -693,7 +693,7 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_ } // otherwise coalesce the span and add to the free span queues - const bool is_abandoned = (segment->thread_id == 0); // mi_segment_is_abandoned(segment); + const bool is_abandoned = mi_segment_is_abandoned(segment); size_t slice_count = slice->slice_count; mi_slice_t* next = slice + slice->slice_count; mi_assert_internal(next <= mi_segment_slices_end(segment)); From 2aee53b1f9615f06a96d84c343bb5d81feb4dbd4 Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 28 Apr 2026 17:07:49 -0700 Subject: [PATCH 34/38] add guard in _mi_page_ptr_unalign to prevent division by zero --- src/free.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/free.c b/src/free.c index cd8396c3..72ff035b 100644 --- a/src/free.c +++ b/src/free.c @@ -61,12 +61,18 @@ mi_block_t* _mi_page_ptr_unalign(const mi_page_t* page, const void* p) { mi_assert_internal(page!=NULL && p!=NULL); size_t diff = (uint8_t*)p - page->page_start; - size_t adjust; + size_t adjust = 0; if mi_likely(page->block_size_shift != 0) { adjust = diff & (((size_t)1 << page->block_size_shift) - 1); } else { - adjust = diff % mi_page_block_size(page); + const size_t block_size = page->block_size; + if mi_likely(block_size != 0) { + adjust = diff % block_size; + } + else { + _mi_error_message(EFAULT, "reading from invalid page, possibly corrupted meta-data (address=%p, page=%p)\n", p, page); + } } return (mi_block_t*)((uintptr_t)p - adjust); From c9b9c8c501487123d7ddd9c3e44cb3350f5c3ebf Mon Sep 17 00:00:00 2001 From: Daan Date: Tue, 28 Apr 2026 17:11:51 -0700 Subject: [PATCH 35/38] always perform a cookie check when using _mi_segment_of --- src/segment-map.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/segment-map.c b/src/segment-map.c index bbcea28a..e3cbfc16 100644 --- a/src/segment-map.c +++ b/src/segment-map.c @@ -113,11 +113,16 @@ static mi_segment_t* _mi_segment_of(const void* p) { size_t bitidx; mi_segmap_part_t* part = mi_segment_map_index_of(segment, false /* dont alloc if not present */, &index, &bitidx); if (part == NULL) return NULL; - const uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]); + const uintptr_t mask = mi_atomic_load_relaxed(&part->map[index]); if mi_likely((mask & ((uintptr_t)1 << bitidx)) != 0) { - bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); - mi_assert_internal(cookie_ok); MI_UNUSED(cookie_ok); - return segment; // yes, allocated by us + // yes, allocated by us + const bool cookie_ok = (_mi_ptr_cookie(segment) == segment->cookie); + if mi_likely(cookie_ok) { + return segment; // yes, allocated by us and valid + } + else { + _mi_error_message(EFAULT, "segment map found an invalid segment, possibly corrupted meta-data (address=%p, segment=%p)\n", p, segment); + } } return NULL; } From e610a0987d84fc7725cae275846c77e72513549b Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 1 Apr 2026 09:45:09 -0700 Subject: [PATCH 36/38] add SpecBot invariant checks --- src/page-queue.c | 31 +++++++++++++++++++++++++++++++ src/page.c | 23 +++++++++++++++++++++++ src/segment.c | 12 ++++++++++++ 3 files changed, 66 insertions(+) diff --git a/src/page-queue.c b/src/page-queue.c index 068d11b2..bfe9f4fd 100644 --- a/src/page-queue.c +++ b/src/page-queue.c @@ -246,6 +246,10 @@ static void mi_page_queue_remove(mi_page_queue_t* queue, mi_page_t* page) { } +#if MI_DEBUG >= 3 +static bool mi_page_queue_is_consistent(const mi_page_queue_t* queue); +#endif + static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(mi_page_heap(page) == heap); mi_assert_internal(!mi_page_queue_contains(queue, page)); @@ -272,8 +276,35 @@ static void mi_page_queue_push(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_ // update direct mi_heap_queue_first_update(heap, queue); heap->page_count++; + + // [specbot Q-NEW-1] first/last must be both null or both non-null (L1, O(1)) + mi_assert_internal((queue->first == NULL) == (queue->last == NULL)); + // [specbot Q-NEW-2] last element must have no next (L1, O(1)) + mi_assert_internal(queue->last == NULL || queue->last->next == NULL); + mi_assert_expensive(mi_page_queue_is_consistent(queue)); } +#if MI_DEBUG >= 3 +// [specbot Q-NEW-3] Verify doubly-linked list forward/backward consistency (L2, O(n)) +static bool mi_page_queue_is_consistent(const mi_page_queue_t* queue) { + if (queue->first == NULL) return (queue->last == NULL); + if (queue->last == NULL) return false; + // forward: first -> ... -> last + const mi_page_t* p = queue->first; + const mi_page_t* prev = NULL; + size_t count = 0; + while (p != NULL) { + mi_assert_internal(p->prev == prev); + prev = p; + p = p->next; + count++; + if (count > 100000) return false; // cycle guard + } + mi_assert_internal(prev == queue->last); + return true; +} +#endif + static void mi_page_queue_move_to_front(mi_heap_t* heap, mi_page_queue_t* queue, mi_page_t* page) { mi_assert_internal(mi_page_heap(page) == heap); mi_assert_internal(mi_page_queue_contains(queue, page)); diff --git a/src/page.c b/src/page.c index 4068a163..9a8aac0e 100644 --- a/src/page.c +++ b/src/page.c @@ -79,9 +79,17 @@ static bool mi_page_list_is_valid(mi_page_t* page, mi_block_t* p) { static bool mi_page_is_valid_init(mi_page_t* page) { mi_assert_internal(mi_page_block_size(page) > 0); + mi_assert_internal(page->used <= page->capacity); mi_assert_internal(page->capacity <= page->reserved); + // [specbot P-NEW-1] block_size_shift must be consistent with block_size (L1, O(1)) + mi_assert_internal(page->block_size_shift == 0 || (mi_page_block_size(page) == ((size_t)1 << page->block_size_shift))); + // [specbot P-NEW-2] capacity must be nonzero when blocks are in use (L1, O(1)) + mi_assert_internal(page->used == 0 || page->capacity > 0); + // [specbot P-NEW-4] page_start must be non-null when capacity > 0 (L1, O(1)) + mi_assert_internal(page->capacity == 0 || mi_page_start(page) != NULL); + uint8_t* start = mi_page_start(page); mi_assert_internal(start == _mi_segment_page_start(_mi_page_segment(page), page, NULL)); mi_assert_internal(page->is_huge == (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE)); @@ -90,6 +98,18 @@ static bool mi_page_is_valid_init(mi_page_t* page) { mi_assert_internal(mi_page_list_is_valid(page,page->free)); mi_assert_internal(mi_page_list_is_valid(page,page->local_free)); + // [specbot P-NEW-7] All free list blocks are aligned to block_size (L2, O(n)) + { + size_t bsize = mi_page_block_size(page); + uint8_t* pstart = mi_page_start(page); + for (mi_block_t* b = page->free; b != NULL; b = mi_block_next(page, b)) { + mi_assert_internal(((uint8_t*)b - pstart) % bsize == 0); + } + for (mi_block_t* b = page->local_free; b != NULL; b = mi_block_next(page, b)) { + mi_assert_internal(((uint8_t*)b - pstart) % bsize == 0); + } + } + #if MI_DEBUG>3 // generally too expensive to check this if (page->free_is_zero) { const size_t ubsize = mi_page_usable_block_size(page); @@ -122,6 +142,9 @@ bool _mi_page_is_valid(mi_page_t* page) { if (mi_page_heap(page)!=NULL) { mi_segment_t* segment = _mi_page_segment(page); + // [specbot P-NEW-6] heap_tag must match owning heap's tag (L1, O(1)) + mi_assert_internal(page->heap_tag == mi_page_heap(page)->tag); + mi_assert_internal(!_mi_process_is_initialized || segment->thread_id==0 || segment->thread_id == mi_page_heap(page)->thread_id); #if MI_HUGE_PAGE_ABANDON if (segment->kind != MI_SEGMENT_HUGE) diff --git a/src/segment.c b/src/segment.c index e82534e3..7ae84616 100644 --- a/src/segment.c +++ b/src/segment.c @@ -275,13 +275,23 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { mi_assert_internal(segment->abandoned <= segment->used); mi_assert_internal(segment->thread_id == 0 || segment->thread_id == _mi_thread_id()); mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->purge_mask)); // can only decommit committed blocks + + // [specbot S-NEW-1] segment must have at least one slice (L1, O(1)) + mi_assert_internal(segment->segment_slices > 0); + // [specbot S-NEW-2] info slices must leave room for at least one data slice (L1, O(1)) + mi_assert_internal(segment->segment_info_slices < segment->segment_slices); + // [specbot S-NEW-3] slice_entries must not exceed array bounds (L1, O(1)) + mi_assert_internal(segment->slice_entries <= MI_SLICES_PER_SEGMENT); + //mi_assert_internal(segment->segment_info_size % MI_SEGMENT_SLICE_SIZE == 0); mi_slice_t* slice = &segment->slices[0]; const mi_slice_t* end = mi_segment_slices_end(segment); size_t used_count = 0; + size_t total_slice_count = 0; mi_span_queue_t* sq; while(slice < end) { mi_assert_internal(slice->slice_count > 0); + total_slice_count += slice->slice_count; mi_assert_internal(slice->slice_offset == 0); size_t index = mi_slice_index(slice); size_t maxindex = (index + slice->slice_count >= segment->slice_entries ? segment->slice_entries : index + slice->slice_count) - 1; @@ -316,6 +326,8 @@ static bool mi_segment_is_valid(mi_segment_t* segment, mi_segments_tld_t* tld) { slice = &segment->slices[maxindex+1]; } mi_assert_internal(slice == end); + // [specbot S-NEW-5] Total slice counts must sum to segment_slices (L2, O(n)) + mi_assert_internal(total_slice_count == segment->segment_slices); mi_assert_internal(used_count == segment->used + 1); return true; } From 8046d4844d14fe0da6bae0b2fcccc5fa29daf00f Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 29 Apr 2026 09:39:46 -0700 Subject: [PATCH 37/38] check segment cookie already at security level 3 (versus 4) as documented --- src/free.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/free.c b/src/free.c index 72ff035b..8762ae1e 100644 --- a/src/free.c +++ b/src/free.c @@ -150,7 +150,7 @@ static inline mi_segment_t* mi_checked_ptr_segment(const void* p, const char* ms } } #endif - #if (MI_DEBUG>0 || MI_SECURE>=4) + #if (MI_DEBUG>0 || MI_SECURE>=3) if mi_unlikely(_mi_ptr_cookie(segment) != segment->cookie) { _mi_error_message(EINVAL, "%s: pointer does not point to a valid heap space: %p\n", msg, p); return NULL; From 24ef7bdcd897b8c175d43ce609287187c53c42e7 Mon Sep 17 00:00:00 2001 From: Daan Date: Wed, 29 Apr 2026 15:21:13 -0700 Subject: [PATCH 38/38] bump version to v1.9.10 --- .github/workflows/release.yaml | 4 ++-- cmake/mimalloc-config-version.cmake | 2 +- contrib/vcpkg/portfile.cmake | 2 +- contrib/vcpkg/vcpkg.json | 2 +- doc/release-notes.md | 2 +- include/mimalloc.h | 2 +- readme.md | 11 ++++++++--- 7 files changed, 15 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c251ab69..10edaea3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -9,7 +9,7 @@ permissions: contents: write env: - RELEASE: Release v3.3.1 + RELEASE: Release v3.3.2 FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true name: Release @@ -19,7 +19,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - branch: [v1.9.9,v2.3.1,v3.3.1] # [dev,dev2,dev3] + branch: [v1.9.10,v2.3.2,v3.3.2] # [dev,dev2,dev3] # we build on the oldest ubuntu version for better binary compatibility. os: [windows-latest, macOS-latest, macos-15-intel, ubuntu-22.04, ubuntu-22.04-arm] diff --git a/cmake/mimalloc-config-version.cmake b/cmake/mimalloc-config-version.cmake index d0d46889..96c9f2c9 100644 --- a/cmake/mimalloc-config-version.cmake +++ b/cmake/mimalloc-config-version.cmake @@ -1,6 +1,6 @@ set(mi_version_major 1) set(mi_version_minor 9) -set(mi_version_patch 9) +set(mi_version_patch 10) set(mi_version ${mi_version_major}.${mi_version_minor}) set(PACKAGE_VERSION ${mi_version}) diff --git a/contrib/vcpkg/portfile.cmake b/contrib/vcpkg/portfile.cmake index 7e07a335..91249f0b 100644 --- a/contrib/vcpkg/portfile.cmake +++ b/contrib/vcpkg/portfile.cmake @@ -3,7 +3,7 @@ vcpkg_from_github( REPO microsoft/mimalloc HEAD_REF master - # The "REF" can be a commit hash, branch name (dev3), or a version (v3.3.1). + # The "REF" can be a commit hash, branch name (dev3), or a version (v3.3.2). REF "v${VERSION}" # REF 866ce5b89db1dbc3e66bbf89041291fd16329518 diff --git a/contrib/vcpkg/vcpkg.json b/contrib/vcpkg/vcpkg.json index 5e5e026e..84ae68f3 100644 --- a/contrib/vcpkg/vcpkg.json +++ b/contrib/vcpkg/vcpkg.json @@ -1,6 +1,6 @@ { "name": "mimalloc", - "version": "3.3.0", + "version": "3.3.2", "port-version": 0, "description": "Compact general purpose allocator with excellent performance", "homepage": "https://github.com/microsoft/mimalloc", diff --git a/doc/release-notes.md b/doc/release-notes.md index 10052d57..b467c146 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -10,6 +10,6 @@ Notes: - Generally it is recommended to download sources (or use `vcpkg` etc.) and build mimalloc as part of your project. - Source releases can also be downloaded directly from github by the tag. - For example . + For example . - Binary releases include a release-, debug-, and secure build. - Linux binaries are built on Ubuntu 22. diff --git a/include/mimalloc.h b/include/mimalloc.h index 0a813295..57433639 100644 --- a/include/mimalloc.h +++ b/include/mimalloc.h @@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file #ifndef MIMALLOC_H #define MIMALLOC_H -#define MI_MALLOC_VERSION 10909 // major + 2 digits minor + 2 digits patch +#define MI_MALLOC_VERSION 10910 // major + 2 digits minor + 2 digits patch // ------------------------------------------------------ // Compiler specific attributes diff --git a/readme.md b/readme.md index 1d8e0e85..553041e9 100644 --- a/readme.md +++ b/readme.md @@ -15,9 +15,9 @@ is a general purpose allocator with excellent [performance](#performance) charac Initially developed by Daan Leijen for the runtime systems of the [Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages. -Latest release : `v3.3.1` (2026-04-20) recommended. -Latest v2 release: `v2.3.1` (2026-04-20) stable. -Latest v1 release: `v1.9.9` (2026-04-20) legacy. +Latest release : `v3.3.2` (2026-04-29) recommended. +Latest v2 release: `v2.3.2` (2026-04-29) stable. +Latest v1 release: `v1.9.10` (2026-04-29) legacy. mimalloc is a drop-in replacement for `malloc` and can be used in other programs without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as: @@ -88,6 +88,11 @@ New development is mostly on v3, while v1 and v2 are maintained with security an - __v1__: legacy version: initial design of mimalloc (release tags: `v1.9.x`, development branch `dev`). Send PR's against this version if possible. ### Releases +* 2026-04-29, `v1.9.10`, `v2.3.2`, `v3.3.2`: various bug and security fixes through LLM audit (by @Zoxc). + Only increase minimal purge size automatically if allow_thp is set to 2. Enable large OS alignment + on all platforms (fixing OS large pages on Windows). Fix accounting of committed memory on Linux/macOS. + Update MSVC atomics implementation when using C mode. Upstream Emscripten fixes. Proper atomic do-once + implementation. * 2026-04-20, `v1.9.9`, `v2.3.1`, `v3.3.1`: various bug and security fixes. Special thanks to @jinpzhanAMD, @res2k, and @GoldJohnKing for their help in improving Windows finalization, and @Zoxc for his help in finding various issues.