-
Notifications
You must be signed in to change notification settings - Fork 44
Expand file tree
/
Copy pathbase_alloc.c
More file actions
337 lines (276 loc) · 10.4 KB
/
base_alloc.c
File metadata and controls
337 lines (276 loc) · 10.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
/*
* Copyright (C) 2024-2025 Intel Corporation
*
* Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <assert.h>
#include "base_alloc.h"
#include "base_alloc_internal.h"
#include "utils_common.h"
#include "utils_concurrency.h"
#include "utils_log.h"
#include "utils_sanitizers.h"
// minimum size of a single pool of the base allocator
#define MINIMUM_POOL_SIZE (ba_os_get_page_size())
// minimum number of chunks used to calculate the size of pools
#define MINIMUM_CHUNK_COUNT (128)
// alignment of the base allocator
#define MEMORY_ALIGNMENT (sizeof(uintptr_t))
typedef struct umf_ba_chunk_t umf_ba_chunk_t;
typedef struct umf_ba_next_pool_t umf_ba_next_pool_t;
// memory chunk of size 'chunk_size'
struct umf_ba_chunk_t {
umf_ba_chunk_t *next;
char user_data[];
};
// metadata is set and used only in the main (the first) pool
struct umf_ba_main_pool_meta_t {
size_t pool_size; // size of each pool (argument of each ba_os_alloc() call)
size_t chunk_size; // size of all memory chunks in this pool
utils_mutex_t free_lock; // lock of free_list
umf_ba_chunk_t *free_list; // list of free chunks
size_t n_allocs; // number of allocated chunks
#ifndef NDEBUG
size_t n_pools;
size_t n_chunks;
#endif /* NDEBUG */
};
// the main pool of the base allocator (there is only one such pool)
struct umf_ba_pool_t {
// address of the beginning of the next pool (a list of allocated pools to be freed in umf_ba_destroy())
umf_ba_next_pool_t *next_pool;
// metadata is set and used only in the main (the first) pool
struct umf_ba_main_pool_meta_t metadata;
// data area of the main pool (the first one) starts here
char data[];
};
// the "next" pools of the base allocator (pools allocated later, when we run out of the memory of the main pool)
struct umf_ba_next_pool_t {
// address of the beginning of the next pool (a list of allocated pools to be freed in umf_ba_destroy())
umf_ba_next_pool_t *next_pool;
// data area of all pools except of the main (the first one) starts here
char data[];
};
#ifndef NDEBUG
#ifdef UMF_DEVELOPER_MODE
static void ba_debug_checks(umf_ba_pool_t *pool) {
// count pools
size_t n_pools = 1;
umf_ba_next_pool_t *next_pool = pool->next_pool;
while (next_pool) {
n_pools++;
next_pool = next_pool->next_pool;
}
assert(n_pools == pool->metadata.n_pools);
// count chunks
size_t n_free_chunks = 0;
umf_ba_chunk_t *next_chunk = pool->metadata.free_list;
while (next_chunk) {
n_free_chunks++;
utils_annotate_memory_defined(next_chunk, sizeof(umf_ba_chunk_t));
umf_ba_chunk_t *tmp = next_chunk;
next_chunk = next_chunk->next;
utils_annotate_memory_inaccessible(tmp, sizeof(umf_ba_chunk_t));
}
assert(n_free_chunks == pool->metadata.n_chunks - pool->metadata.n_allocs);
}
#else /* !UMF_DEVELOPER_MODE */
static inline void ba_debug_checks(umf_ba_pool_t *pool) {
// no debug checks in release mode
(void)pool; // suppress unused parameter warning
}
#endif /* !UMF_DEVELOPER_MODE */
#endif /* NDEBUG */
// ba_divide_memory_into_chunks - divide given memory into chunks of chunk_size and add them to the free_list
static void ba_divide_memory_into_chunks(umf_ba_pool_t *pool, void *ptr,
size_t size) {
// mark the memory temporarily accessible to perform the division
utils_annotate_memory_undefined(ptr, size);
assert(pool->metadata.free_list == NULL);
assert(size > pool->metadata.chunk_size);
char *data_ptr = ptr;
size_t size_left = size;
umf_ba_chunk_t *current_chunk = (umf_ba_chunk_t *)data_ptr;
umf_ba_chunk_t *prev_chunk = current_chunk;
while (size_left >= pool->metadata.chunk_size) {
current_chunk = (umf_ba_chunk_t *)data_ptr;
prev_chunk->next = current_chunk;
data_ptr += pool->metadata.chunk_size;
size_left -= pool->metadata.chunk_size;
prev_chunk = current_chunk;
#ifndef NDEBUG
pool->metadata.n_chunks++;
#endif /* NDEBUG */
}
current_chunk->next = NULL;
pool->metadata.free_list = ptr; // address of the first chunk
// mark the memory as inaccessible again
utils_annotate_memory_inaccessible(ptr, size);
}
static void *ba_os_alloc_annotated(size_t pool_size) {
void *ptr = ba_os_alloc(pool_size);
if (ptr) {
utils_annotate_memory_inaccessible(ptr, pool_size);
}
return ptr;
}
umf_ba_pool_t *umf_ba_create(size_t size) {
size_t chunk_size = ALIGN_UP_SAFE(size, MEMORY_ALIGNMENT);
if (chunk_size == 0) {
return NULL;
}
size_t mutex_size = ALIGN_UP(utils_mutex_get_size(), MEMORY_ALIGNMENT);
size_t metadata_size = sizeof(struct umf_ba_main_pool_meta_t);
size_t pool_size = sizeof(void *) + metadata_size + mutex_size +
(MINIMUM_CHUNK_COUNT * chunk_size);
if (pool_size < MINIMUM_POOL_SIZE) {
pool_size = MINIMUM_POOL_SIZE;
}
pool_size = ALIGN_UP_SAFE(pool_size, ba_os_get_page_size());
if (pool_size == 0) {
return NULL;
}
umf_ba_pool_t *pool = (umf_ba_pool_t *)ba_os_alloc_annotated(pool_size);
if (!pool) {
return NULL;
}
// annotate metadata region as accessible
utils_annotate_memory_undefined(pool, offsetof(umf_ba_pool_t, data));
pool->metadata.pool_size = pool_size;
pool->metadata.chunk_size = chunk_size;
pool->next_pool = NULL; // this is the only pool now
pool->metadata.n_allocs = 0;
#ifndef NDEBUG
pool->metadata.n_pools = 1;
pool->metadata.n_chunks = 0;
#endif /* NDEBUG */
utils_annotate_memory_defined(pool, offsetof(umf_ba_pool_t, data));
char *data_ptr = (char *)&pool->data;
size_t size_left = pool_size - offsetof(umf_ba_pool_t, data);
utils_align_ptr_up_size_down((void **)&data_ptr, &size_left,
MEMORY_ALIGNMENT);
// init free_lock
utils_mutex_t *mutex = utils_mutex_init(&pool->metadata.free_lock);
if (!mutex) {
ba_os_free(pool, pool_size);
return NULL;
}
pool->metadata.free_list = NULL;
ba_divide_memory_into_chunks(pool, data_ptr, size_left);
return pool;
}
void *umf_ba_alloc(umf_ba_pool_t *pool) {
utils_mutex_lock(&pool->metadata.free_lock);
if (pool->metadata.free_list == NULL) {
umf_ba_next_pool_t *new_pool =
(umf_ba_next_pool_t *)ba_os_alloc_annotated(
pool->metadata.pool_size);
if (!new_pool) {
utils_mutex_unlock(&pool->metadata.free_lock);
return NULL;
}
// annotate metadata region as accessible
utils_annotate_memory_undefined(new_pool, sizeof(umf_ba_next_pool_t));
// add the new pool to the list of pools
new_pool->next_pool = pool->next_pool;
pool->next_pool = new_pool;
#ifndef NDEBUG
pool->metadata.n_pools++;
#endif /* NDEBUG */
char *data_ptr = (char *)&new_pool->data;
size_t size_left =
pool->metadata.pool_size - offsetof(umf_ba_next_pool_t, data);
utils_align_ptr_up_size_down((void **)&data_ptr, &size_left,
MEMORY_ALIGNMENT);
ba_divide_memory_into_chunks(pool, data_ptr, size_left);
}
umf_ba_chunk_t *chunk = pool->metadata.free_list;
// mark the memory defined to read the next ptr, after this is done
// we'll mark the memory as undefined
utils_annotate_memory_defined(chunk, sizeof(*chunk));
// check if the free list is not empty
if (pool->metadata.free_list == NULL) {
LOG_ERR("base_alloc: Free list should not be empty before new alloc");
utils_mutex_unlock(&pool->metadata.free_lock);
return NULL;
}
pool->metadata.free_list = pool->metadata.free_list->next;
pool->metadata.n_allocs++;
#ifndef NDEBUG
ba_debug_checks(pool);
#endif /* NDEBUG */
VALGRIND_DO_MALLOCLIKE_BLOCK(chunk, pool->metadata.chunk_size, 0, 0);
utils_annotate_memory_undefined(chunk, pool->metadata.chunk_size);
utils_mutex_unlock(&pool->metadata.free_lock);
return chunk;
}
#ifndef NDEBUG
// Checks if given pointer belongs to the pool. Should be called
// under the lock
static int pool_contains_pointer(umf_ba_pool_t *pool, void *ptr) {
char *cptr = (char *)ptr;
if (cptr >= pool->data &&
cptr < ((char *)(pool)) + pool->metadata.pool_size) {
return 1;
}
umf_ba_next_pool_t *next_pool = pool->next_pool;
while (next_pool) {
if (cptr >= next_pool->data &&
cptr < ((char *)(next_pool)) + pool->metadata.pool_size) {
return 1;
}
next_pool = next_pool->next_pool;
}
return 0;
}
#endif
void umf_ba_free(umf_ba_pool_t *pool, void *ptr) {
if (ptr == NULL) {
return;
}
umf_ba_chunk_t *chunk = (umf_ba_chunk_t *)ptr;
utils_mutex_lock(&pool->metadata.free_lock);
assert(pool_contains_pointer(pool, ptr));
chunk->next = pool->metadata.free_list;
pool->metadata.free_list = chunk;
pool->metadata.n_allocs--;
#ifndef NDEBUG
ba_debug_checks(pool);
#endif /* NDEBUG */
VALGRIND_DO_FREELIKE_BLOCK(chunk, 0);
utils_annotate_memory_inaccessible(chunk, pool->metadata.chunk_size);
utils_mutex_unlock(&pool->metadata.free_lock);
}
void umf_ba_destroy(umf_ba_pool_t *pool) {
// Do not destroy if we are running in the proxy library,
// because it may need those resources till
// the very end of exiting the application.
if (pool->metadata.n_allocs && utils_is_running_in_proxy_lib()) {
return;
}
#ifndef NDEBUG
ba_debug_checks(pool);
if (pool->metadata.n_allocs) {
#ifdef UMF_DEVELOPER_MODE
LOG_FATAL("number of base allocator memory leaks: %zu",
pool->metadata.n_allocs);
assert(pool->metadata.n_allocs == 0 &&
"memory leaks in base allocator occurred");
#else
LOG_ERR("number of base allocator memory leaks: %zu",
pool->metadata.n_allocs);
#endif
}
#endif /* NDEBUG */
size_t size = pool->metadata.pool_size;
umf_ba_next_pool_t *current_pool;
umf_ba_next_pool_t *next_pool = pool->next_pool;
while (next_pool) {
current_pool = next_pool;
next_pool = next_pool->next_pool;
ba_os_free(current_pool, size);
}
utils_mutex_destroy_not_free(&pool->metadata.free_lock);
ba_os_free(pool, size);
}