Graphviz 13.1.3~dev.20250814.1035
Loading...
Searching...
No Matches
list.h
Go to the documentation of this file.
1
3#pragma once
4
5#include <assert.h>
6#include <errno.h>
7#include <stdbool.h>
8#include <stdint.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <util/alloc.h>
13#include <util/asan.h>
14#include <util/exit.h>
15#include <util/unused.h>
16
22#define DEFINE_LIST(name, type) DEFINE_LIST_WITH_DTOR(name, type, name##_noop_)
23
30#define DEFINE_LIST_WITH_DTOR(name, type, dtor) \
31 \
32 \
37 typedef struct { \
38 type *base; /* start of the allocation for backing memory */ \
39 /* (base == NULL && capacity == 0) || (base != NULL && capacity > 0) */ \
40 size_t head; /* index of the first element */ \
41 /* (capacity == 0 && head == 0) || (capacity > 0 && head < capacity) */ \
42 size_t size; /* number of elements in the list */ \
43 /* size <= capacity */ \
44 size_t capacity; /* available storage slots */ \
45 } name##_t; \
46 \
47 /* default “do nothing” destructor */ \
48 static inline UNUSED void name##_noop_(type item) { (void)item; } \
49 \
50 \
51 static inline size_t name##_size(const name##_t *list) { \
52 assert(list != NULL); \
53 return list->size; \
54 } \
55 \
56 \
57 static inline UNUSED bool name##_is_empty(const name##_t *list) { \
58 assert(list != NULL); \
59 return name##_size(list) == 0; \
60 } \
61 \
62 static inline int name##_try_append(name##_t *list, type item) { \
63 assert(list != NULL); \
64 \
65 /* do we need to expand the backing storage? */ \
66 if (list->size == list->capacity) { \
67 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2); \
68 \
69 /* will the calculation of the new size overflow? */ \
70 if (SIZE_MAX / c < sizeof(type)) { \
71 return ERANGE; \
72 } \
73 \
74 type *base = (type *)realloc(list->base, c * sizeof(type)); \
75 if (base == NULL) { \
76 return ENOMEM; \
77 } \
78 \
79 /* zero the new memory */ \
80 memset(&base[list->capacity], 0, (c - list->capacity) * sizeof(type)); \
81 \
82 /* poison the new (conceptually unallocated) memory */ \
83 ASAN_POISON(&base[list->capacity], (c - list->capacity) * sizeof(type)); \
84 \
85 /* Do we need to shuffle the prefix upwards? E.g. */ \
86 /* */ \
87 /* ┌───┬───┬───┬───┐ */ \
88 /* old: │ 3 │ 4 │ 1 │ 2 │ */ \
89 /* └───┴───┴─┼─┴─┼─┘ */ \
90 /* │ └───────────────┐ */ \
91 /* └───────────────┐ │ */ \
92 /* ▼ ▼ */ \
93 /* ┌───┬───┬───┬───┬───┬───┬───┬───┐ */ \
94 /* new: │ 3 │ 4 │ │ │ │ │ 1 │ 2 │ */ \
95 /* └───┴───┴───┴───┴───┴───┴───┴───┘ */ \
96 /* a b c d e f g h */ \
97 if (list->head + list->size > list->capacity) { \
98 const size_t prefix = list->capacity - list->head; \
99 const size_t new_head = c - prefix; \
100 /* unpoison target range, slots [g, h] in example */ \
101 ASAN_UNPOISON(&base[new_head], prefix * sizeof(type)); \
102 memmove(&base[new_head], &base[list->head], prefix * sizeof(type)); \
103 /* (re-)poison new gap, slots [c, f] in example */ \
104 ASAN_POISON(&base[list->size - prefix], \
105 (list->capacity - list->size) * sizeof(type)); \
106 list->head = new_head; \
107 } \
108 \
109 list->base = base; \
110 list->capacity = c; \
111 } \
112 \
113 const size_t new_slot = (list->head + list->size) % list->capacity; \
114 ASAN_UNPOISON(&list->base[new_slot], sizeof(type)); \
115 list->base[new_slot] = item; \
116 ++list->size; \
117 \
118 return 0; \
119 } \
120 \
121 static inline void name##_append(name##_t *list, type item) { \
122 int rc = name##_try_append(list, item); \
123 if (rc != 0) { \
124 fprintf(stderr, "realloc failed: %s\n", strerror(rc)); \
125 graphviz_exit(EXIT_FAILURE); \
126 } \
127 } \
128 \
129 \
130 static inline UNUSED void name##_prepend(name##_t *list, type item) { \
131 assert(list != NULL); \
132 \
133 /* do we need to expand the backing storage? */ \
134 if (list->size == list->capacity) { \
135 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2); \
136 \
137 /* will the calculation of the new size overflow? */ \
138 if (SIZE_MAX / c < sizeof(type)) { \
139 fprintf(stderr, "integer overflow during list prepend\n"); \
140 graphviz_exit(EXIT_FAILURE); \
141 } \
142 \
143 type *base = \
144 (type *)gv_recalloc(list->base, list->capacity, c, sizeof(type)); \
145 \
146 /* zero the new memory */ \
147 memset(&base[list->capacity], 0, (c - list->capacity) * sizeof(type)); \
148 \
149 /* poison the new (conceptually unallocated) memory */ \
150 ASAN_POISON(&base[list->capacity], (c - list->capacity) * sizeof(type)); \
151 \
152 /* Do we need to shuffle the prefix upwards? E.g. */ \
153 /* */ \
154 /* ┌───┬───┬───┬───┐ */ \
155 /* old: │ 3 │ 4 │ 1 │ 2 │ */ \
156 /* └───┴───┴─┼─┴─┼─┘ */ \
157 /* │ └───────────────┐ */ \
158 /* └───────────────┐ │ */ \
159 /* ▼ ▼ */ \
160 /* ┌───┬───┬───┬───┬───┬───┬───┬───┐ */ \
161 /* new: │ 3 │ 4 │ │ │ │ │ 1 │ 2 │ */ \
162 /* └───┴───┴───┴───┴───┴───┴───┴───┘ */ \
163 /* a b c d e f g h */ \
164 if (list->head + list->size > list->capacity) { \
165 const size_t prefix = list->capacity - list->head; \
166 const size_t new_head = c - prefix; \
167 /* unpoison target range, slots [g, h] in example */ \
168 ASAN_UNPOISON(&base[new_head], prefix * sizeof(type)); \
169 memmove(&base[new_head], &base[list->head], prefix * sizeof(type)); \
170 /* (re-)poison new gap, slots [c, f] in example */ \
171 ASAN_POISON(&base[list->size - prefix], \
172 (list->capacity - list->size) * sizeof(type)); \
173 list->head = new_head; \
174 } \
175 \
176 list->base = base; \
177 list->capacity = c; \
178 } \
179 \
180 assert(list->size < list->capacity); \
181 const size_t new_slot = \
182 (list->head + (list->capacity - 1)) % list->capacity; \
183 ASAN_UNPOISON(&list->base[new_slot], sizeof(type)); \
184 list->base[new_slot] = item; \
185 list->head = (list->head + (list->capacity - 1)) % list->capacity; \
186 ++list->size; \
187 } \
188 \
189 \
195 static inline type name##_get(const name##_t *list, size_t index) { \
196 assert(list != NULL); \
197 assert(index < list->size && "index out of bounds"); \
198 return list->base[(list->head + index) % list->capacity]; \
199 } \
200 \
201 \
213 static inline type *name##_at(name##_t *list, size_t index) { \
214 assert(list != NULL); \
215 assert(index < list->size && "index out of bounds"); \
216 return &list->base[(list->head + index) % list->capacity]; \
217 } \
218 \
219 \
220 static inline UNUSED type *name##_front(name##_t *list) { \
221 assert(list != NULL); \
222 assert(!name##_is_empty(list)); \
223 return name##_at(list, 0); \
224 } \
225 \
226 \
227 static inline UNUSED type *name##_back(name##_t *list) { \
228 assert(list != NULL); \
229 assert(!name##_is_empty(list)); \
230 return name##_at(list, name##_size(list) - 1); \
231 } \
232 \
233 \
239 static inline void name##_set(name##_t *list, size_t index, type item) { \
240 assert(list != NULL); \
241 assert(index < name##_size(list) && "index out of bounds"); \
242 type *target = name##_at(list, index); \
243 dtor(*target); \
244 *target = item; \
245 } \
246 \
247 \
252 static inline UNUSED void name##_remove(name##_t *list, const type item) { \
253 assert(list != NULL); \
254 \
255 for (size_t i = 0; i < list->size; ++i) { \
256 /* is this the element we are looking for? */ \
257 type *candidate = name##_at(list, i); \
258 if (memcmp(candidate, &item, sizeof(type)) == 0) { \
259 \
260 /* destroy the element we are about to remove */ \
261 dtor(*candidate); \
262 \
263 /* shrink the list */ \
264 for (size_t j = i + 1; j < list->size; ++j) { \
265 type *replacement = name##_at(list, j); \
266 *candidate = *replacement; \
267 candidate = replacement; \
268 } \
269 ASAN_POISON(name##_at(list, list->size - 1), sizeof(type)); \
270 --list->size; \
271 return; \
272 } \
273 } \
274 } \
275 \
276 \
277 static inline void name##_clear(name##_t *list) { \
278 assert(list != NULL); \
279 \
280 for (size_t i = 0; i < list->size; ++i) { \
281 dtor(name##_get(list, i)); \
282 ASAN_POISON(name##_at(list, i), sizeof(type)); \
283 } \
284 \
285 list->size = 0; \
286 \
287 /* opportunistically re-sync the list */ \
288 list->head = 0; \
289 } \
290 \
291 \
296 static inline UNUSED void name##_reserve(name##_t *list, size_t capacity) { \
297 assert(list != NULL); \
298 \
299 /* if we can already fit enough items, nothing to do */ \
300 if (list->capacity >= capacity) { \
301 return; \
302 } \
303 \
304 list->base = (type *)gv_recalloc(list->base, list->capacity, capacity, \
305 sizeof(type)); \
306 \
307 /* Do we need to shuffle the prefix upwards? E.g. */ \
308 /* */ \
309 /* ┌───┬───┬───┬───┐ */ \
310 /* old: │ 3 │ 4 │ 1 │ 2 │ */ \
311 /* └───┴───┴─┼─┴─┼─┘ */ \
312 /* │ └───────────────┐ */ \
313 /* └───────────────┐ │ */ \
314 /* ▼ ▼ */ \
315 /* ┌───┬───┬───┬───┬───┬───┬───┬───┐ */ \
316 /* new: │ 3 │ 4 │ │ │ │ │ 1 │ 2 │ */ \
317 /* └───┴───┴───┴───┴───┴───┴───┴───┘ */ \
318 /* a b c d e f g h */ \
319 if (list->head + list->size > list->capacity) { \
320 const size_t prefix = list->capacity - list->head; \
321 const size_t new_head = capacity - prefix; \
322 /* unpoison target range, slots [g, h] in example */ \
323 ASAN_UNPOISON(&list->base[new_head], prefix * sizeof(type)); \
324 memmove(&list->base[new_head], &list->base[list->head], \
325 prefix * sizeof(type)); \
326 /* (re-)poison new gap, slots [c, f] in example */ \
327 ASAN_POISON(&list->base[list->size - prefix], \
328 (list->capacity - list->size) * sizeof(type)); \
329 list->head = new_head; \
330 } \
331 \
332 list->capacity = capacity; \
333 } \
334 \
335 \
336 static inline UNUSED bool name##_contains( \
337 const name##_t *haystack, const type needle, \
338 bool (*eq)(const type a, const type b)) { \
339 assert(haystack != NULL); \
340 assert(eq != NULL); \
341 \
342 for (size_t i = 0; i < name##_size(haystack); ++i) { \
343 if (eq(name##_get(haystack, i), needle)) { \
344 return true; \
345 } \
346 } \
347 return false; \
348 } \
349 \
350 \
351 static inline UNUSED name##_t name##_copy(const name##_t *source) { \
352 assert(source != NULL); \
353 \
354 name##_t destination = {(type *)gv_calloc(source->capacity, sizeof(type)), \
355 0, 0, source->capacity}; \
356 for (size_t i = 0; i < source->size; ++i) { \
357 name##_append(&destination, name##_get(source, i)); \
358 } \
359 return destination; \
360 } \
361 \
362 \
363 \
364 \
365 \
366 \
367 \
368 \
369 \
370 \
371 \
372 \
373 \
374 \
375 \
376 static inline UNUSED bool name##_is_contiguous(const name##_t *list) { \
377 assert(list != NULL); \
378 return list->head + list->size <= list->capacity; \
379 } \
380 \
381 \
382 static inline void name##_sync(name##_t *list) { \
383 assert(list != NULL); \
384 \
385 /* Allow unrestricted access. The shuffle below accesses both allocated \
386 * and unallocated elements, so just let it read and write everything. \
387 */ \
388 ASAN_UNPOISON(list->base, list->capacity * sizeof(type)); \
389 \
390 /* Shuffle the list 1-1 until it is aligned. This is not efficient, but */ \
391 /* we assume this is a relatively rare operation. */ \
392 while (list->head != 0) { \
393 /* rotate the list leftwards by 1 */ \
394 assert(list->capacity > 0); \
395 type replacement = list->base[0]; \
396 for (size_t i = list->capacity - 1; i != SIZE_MAX; --i) { \
397 type temp = list->base[i]; \
398 list->base[i] = replacement; \
399 replacement = temp; \
400 } \
401 --list->head; \
402 } \
403 \
404 /* synchronization should have ensured the list no longer wraps */ \
405 assert(name##_is_contiguous(list)); \
406 \
407 /* re-establish access restrictions */ \
408 ASAN_POISON(&list->base[list->size], \
409 (list->capacity - list->size) * sizeof(type)); \
410 } \
411 \
412 \
413 static inline UNUSED void name##_sort( \
414 name##_t *list, int (*cmp)(const type *a, const type *b)) { \
415 assert(list != NULL); \
416 assert(cmp != NULL); \
417 \
418 name##_sync(list); \
419 \
420 int (*compar)(const void *, const void *) = \
421 (int (*)(const void *, const void *))cmp; \
422 if (list->size > 0) { \
423 qsort(list->base, list->size, sizeof(type), compar); \
424 } \
425 } \
426 \
427 \
428 static inline UNUSED void name##_reverse(name##_t *list) { \
429 assert(list != NULL); \
430 \
431 for (size_t i = 0; i < name##_size(list) / 2; ++i) { \
432 type const temp1 = name##_get(list, i); \
433 type const temp2 = name##_get(list, name##_size(list) - i - 1); \
434 name##_set(list, i, temp2); \
435 name##_set(list, name##_size(list) - i - 1, temp1); \
436 } \
437 } \
438 \
439 \
440 static inline UNUSED void name##_shrink_to_fit(name##_t *list) { \
441 assert(list != NULL); \
442 \
443 name##_sync(list); \
444 \
445 if (list->capacity > list->size) { \
446 list->base = (type *)gv_recalloc(list->base, list->capacity, list->size, \
447 sizeof(type)); \
448 list->capacity = list->size; \
449 } \
450 } \
451 \
452 \
453 static inline UNUSED void name##_free(name##_t *list) { \
454 assert(list != NULL); \
455 name##_clear(list); \
456 free(list->base); \
457 memset(list, 0, sizeof(*list)); \
458 } \
459 \
460 \
461 static inline UNUSED void name##_push_back(name##_t *list, type value) { \
462 name##_append(list, value); \
463 } \
464 \
465 \
466 static inline UNUSED type name##_pop_front(name##_t *list) { \
467 assert(list != NULL); \
468 assert(list->size > 0); \
469 \
470 type value = name##_get(list, 0); \
471 \
472 /* do not call `dtor` because we are transferring ownership of the removed \
473 * element to the caller \
474 */ \
475 ASAN_POISON(name##_at(list, 0), sizeof(type)); \
476 list->head = (list->head + 1) % list->capacity; \
477 --list->size; \
478 \
479 return value; \
480 } \
481 \
482 \
483 static inline UNUSED type name##_pop_back(name##_t *list) { \
484 assert(list != NULL); \
485 assert(list->size > 0); \
486 \
487 type value = name##_get(list, list->size - 1); \
488 \
489 /* do not call `dtor` because we are transferring ownership of the removed \
490 * element to the caller \
491 */ \
492 ASAN_POISON(name##_at(list, list->size - 1), sizeof(type)); \
493 --list->size; \
494 \
495 return value; \
496 } \
497 \
498 \
507 static inline UNUSED type *name##_detach(name##_t *list) { \
508 assert(list != NULL); \
509 name##_sync(list); \
510 type *data = list->base; \
511 memset(list, 0, sizeof(*list)); \
512 return data; \
513 }
Memory allocation wrappers that exit on failure.
macros for interacting with Address Sanitizer
abstraction for squashing compiler warnings for unused symbols