-
Notifications
You must be signed in to change notification settings - Fork 44
Expand file tree
/
Copy pathbase_alloc_linear.c
More file actions
314 lines (272 loc) · 10.9 KB
/
base_alloc_linear.c
File metadata and controls
314 lines (272 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
/*
* Copyright (C) 2024-2025 Intel Corporation
*
* Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <assert.h>
#include <stdint.h>
#include "base_alloc_internal.h"
#include "base_alloc_linear.h"
#include "utils_common.h"
#include "utils_concurrency.h"
#include "utils_log.h"
#include "utils_sanitizers.h"
#ifndef NDEBUG
#define _DEBUG_EXECUTE(expression) DO_WHILE_EXPRS(expression)
#else
#define _DEBUG_EXECUTE(expression) DO_WHILE_EMPTY
#endif /* NDEBUG */
// minimum size of a single pool of the linear base allocator
#define MINIMUM_LINEAR_POOL_SIZE (ba_os_get_page_size())
// alignment of the linear base allocator
#define MEMORY_ALIGNMENT (sizeof(uintptr_t))
typedef struct umf_ba_next_linear_pool_t umf_ba_next_linear_pool_t;
// metadata is set and used only in the main (the first) pool
typedef struct umf_ba_main_linear_pool_meta_t {
size_t pool_size; // size of this pool (argument of ba_os_alloc() call)
utils_mutex_t lock;
char *data_ptr;
size_t size_left;
size_t pool_n_allocs; // number of allocations in this pool
#ifndef NDEBUG
size_t n_pools;
size_t global_n_allocs; // global number of allocations in all pools
#endif /* NDEBUG */
} umf_ba_main_linear_pool_meta_t;
// the main pool of the linear base allocator (there is only one such pool)
struct umf_ba_linear_pool {
// address of the beginning of the next pool (a list of allocated pools
// to be freed in umf_ba_linear_destroy())
umf_ba_next_linear_pool_t *next_pool;
// metadata is set and used only in the main (the first) pool
umf_ba_main_linear_pool_meta_t metadata;
// data area of the main pool (the first one) starts here
char data[];
};
// the "next" pools of the linear base allocator (pools allocated later,
// when we run out of the memory of the main pool)
struct umf_ba_next_linear_pool_t {
// address of the beginning of the next pool (a list of allocated pools
// to be freed in umf_ba_linear_destroy())
umf_ba_next_linear_pool_t *next_pool;
size_t pool_size; // size of this pool (argument of ba_os_alloc() call)
size_t pool_n_allocs; // number of allocations in this pool
// data area of all pools except of the main (the first one) starts here
char data[];
};
#ifndef NDEBUG
static void ba_debug_checks(umf_ba_linear_pool_t *pool) {
// count pools
size_t n_pools = 1;
umf_ba_next_linear_pool_t *next_pool = pool->next_pool;
while (next_pool) {
n_pools++;
next_pool = next_pool->next_pool;
}
assert(n_pools == pool->metadata.n_pools);
}
#endif /* NDEBUG */
umf_ba_linear_pool_t *umf_ba_linear_create(size_t pool_size) {
pool_size += sizeof(umf_ba_next_linear_pool_t *) +
sizeof(umf_ba_main_linear_pool_meta_t);
if (pool_size < MINIMUM_LINEAR_POOL_SIZE) {
pool_size = MINIMUM_LINEAR_POOL_SIZE;
}
pool_size = ALIGN_UP_SAFE(pool_size, ba_os_get_page_size());
if (pool_size == 0) {
LOG_ERR("pool_size page alignment overflow");
return NULL;
}
umf_ba_linear_pool_t *pool = (umf_ba_linear_pool_t *)ba_os_alloc(pool_size);
if (!pool) {
return NULL;
}
void *data_ptr = &pool->data;
size_t size_left = pool_size - offsetof(umf_ba_linear_pool_t, data);
utils_align_ptr_up_size_down(&data_ptr, &size_left, MEMORY_ALIGNMENT);
pool->metadata.pool_size = pool_size;
pool->metadata.data_ptr = data_ptr;
pool->metadata.size_left = size_left;
pool->next_pool = NULL; // this is the only pool now
pool->metadata.pool_n_allocs = 0;
_DEBUG_EXECUTE(pool->metadata.n_pools = 1);
_DEBUG_EXECUTE(pool->metadata.global_n_allocs = 0);
// init lock
utils_mutex_t *lock = utils_mutex_init(&pool->metadata.lock);
if (!lock) {
ba_os_free(pool, pool_size);
return NULL;
}
return pool;
}
void *umf_ba_linear_alloc(umf_ba_linear_pool_t *pool, size_t size) {
if (size == 0) {
return NULL;
}
size_t aligned_size = ALIGN_UP_SAFE(size, MEMORY_ALIGNMENT);
if (aligned_size == 0) {
LOG_ERR("size alignment overflow");
return NULL;
}
utils_mutex_lock(&pool->metadata.lock);
if (pool->metadata.size_left < aligned_size) {
size_t pool_size = MINIMUM_LINEAR_POOL_SIZE;
size_t usable_size =
pool_size - offsetof(umf_ba_next_linear_pool_t, data);
if (usable_size < aligned_size) {
pool_size += aligned_size - usable_size;
pool_size = ALIGN_UP_SAFE(pool_size, ba_os_get_page_size());
if (pool_size == 0) {
utils_mutex_unlock(&pool->metadata.lock);
LOG_ERR("pool_size page alignment overflow");
return NULL;
}
}
assert(pool_size - offsetof(umf_ba_next_linear_pool_t, data) >=
aligned_size);
umf_ba_next_linear_pool_t *new_pool =
(umf_ba_next_linear_pool_t *)ba_os_alloc(pool_size);
if (!new_pool) {
utils_mutex_unlock(&pool->metadata.lock);
return NULL;
}
new_pool->pool_size = pool_size;
new_pool->pool_n_allocs = 0;
void *data_ptr = &new_pool->data;
size_t size_left =
new_pool->pool_size - offsetof(umf_ba_next_linear_pool_t, data);
utils_align_ptr_up_size_down(&data_ptr, &size_left, MEMORY_ALIGNMENT);
pool->metadata.data_ptr = data_ptr;
pool->metadata.size_left = size_left;
// add the new pool to the list of pools
new_pool->next_pool = pool->next_pool;
pool->next_pool = new_pool;
_DEBUG_EXECUTE(pool->metadata.n_pools++);
}
assert(pool->metadata.size_left >= aligned_size);
void *ptr = pool->metadata.data_ptr;
pool->metadata.data_ptr += aligned_size;
pool->metadata.size_left -= aligned_size;
if (pool->next_pool) {
pool->next_pool->pool_n_allocs++;
} else {
pool->metadata.pool_n_allocs++;
}
_DEBUG_EXECUTE(pool->metadata.global_n_allocs++);
_DEBUG_EXECUTE(ba_debug_checks(pool));
VALGRIND_DO_MALLOCLIKE_BLOCK(ptr, aligned_size, 0, 0);
utils_mutex_unlock(&pool->metadata.lock);
return ptr;
}
// check if ptr belongs to pool
static inline int pool_contains_ptr(void *pool, size_t pool_size,
void *data_begin, void *ptr) {
return ((char *)ptr >= (char *)data_begin &&
(char *)ptr < ((char *)(pool)) + pool_size);
}
// umf_ba_linear_free() really frees memory only if all allocations from an inactive pool were freed
// It returns:
// 0 - ptr belonged to the pool and was freed
// -1 - ptr doesn't belong to the pool and wasn't freed
int umf_ba_linear_free(umf_ba_linear_pool_t *pool, void *ptr) {
utils_mutex_lock(&pool->metadata.lock);
_DEBUG_EXECUTE(ba_debug_checks(pool));
if (pool_contains_ptr(pool, pool->metadata.pool_size, pool->data, ptr)) {
pool->metadata.pool_n_allocs--;
_DEBUG_EXECUTE(pool->metadata.global_n_allocs--);
VALGRIND_DO_FREELIKE_BLOCK(ptr, 0);
size_t page_size = ba_os_get_page_size();
if ((pool->metadata.pool_n_allocs == 0) && pool->next_pool &&
(pool->metadata.pool_size > page_size)) {
// we can free the first (main) pool except of the first page containing the metadata
void *pool_ptr = (char *)pool + page_size;
size_t size = pool->metadata.pool_size - page_size;
ba_os_free(pool_ptr, size);
// update pool_size
pool->metadata.pool_size = page_size;
}
_DEBUG_EXECUTE(ba_debug_checks(pool));
utils_mutex_unlock(&pool->metadata.lock);
return 0;
}
umf_ba_next_linear_pool_t *next_pool = pool->next_pool;
umf_ba_next_linear_pool_t *prev_pool = NULL;
while (next_pool) {
if (pool_contains_ptr(next_pool, next_pool->pool_size, next_pool->data,
ptr)) {
_DEBUG_EXECUTE(pool->metadata.global_n_allocs--);
next_pool->pool_n_allocs--;
// pool->next_pool is the active pool - we cannot free it
if ((next_pool->pool_n_allocs == 0) &&
next_pool != pool->next_pool) {
assert(prev_pool); // it cannot be the active pool
assert(prev_pool->next_pool == next_pool);
prev_pool->next_pool = next_pool->next_pool;
_DEBUG_EXECUTE(pool->metadata.n_pools--);
void *next_pool_ptr = next_pool;
size_t size = next_pool->pool_size;
ba_os_free(next_pool_ptr, size);
}
_DEBUG_EXECUTE(ba_debug_checks(pool));
utils_mutex_unlock(&pool->metadata.lock);
VALGRIND_DO_FREELIKE_BLOCK(ptr, 0);
return 0;
}
prev_pool = next_pool;
next_pool = next_pool->next_pool;
}
utils_mutex_unlock(&pool->metadata.lock);
// ptr doesn't belong to the pool and wasn't freed
return -1;
}
void umf_ba_linear_destroy(umf_ba_linear_pool_t *pool) {
// Do not destroy if we are running in the proxy library,
// because it may need those resources till
// the very end of exiting the application.
if (utils_is_running_in_proxy_lib()) {
return;
}
#ifndef NDEBUG
_DEBUG_EXECUTE(ba_debug_checks(pool));
if (pool->metadata.global_n_allocs) {
LOG_ERR("global_n_allocs = %zu", pool->metadata.global_n_allocs);
}
#endif /* NDEBUG */
umf_ba_next_linear_pool_t *current_pool;
umf_ba_next_linear_pool_t *next_pool = pool->next_pool;
while (next_pool) {
current_pool = next_pool;
next_pool = next_pool->next_pool;
ba_os_free(current_pool, current_pool->pool_size);
}
utils_mutex_destroy_not_free(&pool->metadata.lock);
ba_os_free(pool, pool->metadata.pool_size);
}
// umf_ba_linear_pool_contains_pointer() returns:
// - 0 if ptr does not belong to the pool or
// - size (> 0) of the memory region from ptr
// to the end of the pool if ptr belongs to the pool
size_t umf_ba_linear_pool_contains_pointer(umf_ba_linear_pool_t *pool,
void *ptr) {
utils_mutex_lock(&pool->metadata.lock);
char *cptr = (char *)ptr;
if (cptr >= pool->data &&
cptr < ((char *)(pool)) + pool->metadata.pool_size) {
size_t size = ((char *)(pool)) + pool->metadata.pool_size - cptr;
utils_mutex_unlock(&pool->metadata.lock);
return size;
}
umf_ba_next_linear_pool_t *next_pool = pool->next_pool;
while (next_pool) {
if (cptr >= next_pool->data &&
cptr < ((char *)(next_pool)) + next_pool->pool_size) {
size_t size = ((char *)(next_pool)) + next_pool->pool_size - cptr;
utils_mutex_unlock(&pool->metadata.lock);
return size;
}
next_pool = next_pool->next_pool;
}
utils_mutex_unlock(&pool->metadata.lock);
return 0;
}