Graphviz 14.1.2~dev.20260121.0931
Loading...
Searching...
No Matches
list.c
Go to the documentation of this file.
1#ifndef NO_CONFIG // defined by test_list.c
2#include "config.h"
3#endif
4
5#include <assert.h>
6#include <errno.h>
7#include <stdbool.h>
8#include <stdint.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12#include <util/alloc.h>
13#include <util/asan.h>
14#include <util/exit.h>
15#include <util/gv_math.h>
16#include <util/list-private.h>
17#include <util/prisize_t.h>
18
19static const void *slot_from_const_list(const list_t_ *list, size_t index,
20 size_t stride) {
21 assert(list != NULL);
22 assert(list->base != NULL || index == 0 || stride == 0);
23
24 const char *const base = list->base;
25 return base + index * stride;
26}
27
28static void *slot_from_list(list_t_ *list, size_t index, size_t stride) {
29 assert(list != NULL);
30 assert(list->base != NULL || index == 0 || stride == 0);
31
32 char *const base = list->base;
33 return base + index * stride;
34}
35
36static const void *slot_from_const_base(const void *base, size_t index,
37 size_t stride) {
38 assert(base != NULL || index == 0 || stride == 0);
39
40 const char *const b = base;
41 return b + index * stride;
42}
43
44static void *slot_from_base(void *base, size_t index, size_t stride) {
45 assert(base != NULL || index == 0 || stride == 0);
46
47 char *const b = base;
48 return b + index * stride;
49}
50
51#define INDEX_TO(origin, index, stride) \
52 (_Generic((origin), \
53 const list_t_ *: slot_from_const_list, \
54 list_t_ *: slot_from_list, \
55 const void *: slot_from_const_base, \
56 void *: slot_from_base)((origin), (index), (stride)))
57
58size_t gv_list_append_slot_(list_t_ *list, size_t item_size) {
59 assert(list != NULL);
60
61 // do we need to expand the backing storage?
62 if (list->size == list->capacity) {
63 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
64 gv_list_reserve_(list, c, item_size);
65 }
66
67 assert(list->capacity > 0);
68 assert(list->size < list->capacity);
69
70 // append the new slot
71 const size_t new_slot = (list->head + list->size) % list->capacity;
72 void *const slot = INDEX_TO(list, new_slot, item_size);
73 ASAN_UNPOISON(slot, item_size);
74 ++list->size;
75
76 return new_slot;
77}
78
79size_t gv_list_prepend_slot_(list_t_ *list, size_t item_size) {
80 assert(list != NULL);
81
82 // do we need to expand the backing storage?
83 if (list->size == list->capacity) {
84 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
85 gv_list_reserve_(list, c, item_size);
86 }
87
88 assert(list->capacity > 0);
89 assert(list->size < list->capacity);
90
91 // prepend the new slot
92 list->head = (list->head + (list->capacity - 1)) % list->capacity;
93 void *const slot = INDEX_TO(list, list->head, item_size);
94 ASAN_UNPOISON(slot, item_size);
95 ++list->size;
96
97 return list->head;
98}
99
100static int try_reserve(list_t_ *list, size_t capacity, size_t item_size) {
101 assert(list != NULL);
102
103 // if we can already fit enough items, nothing to do
104 if (list->capacity >= capacity) {
105 return 0;
106 }
107
108 // will the arithmetic below overflow?
109 assert(capacity > 0);
110 if (SIZE_MAX / capacity < item_size) {
111 return EOVERFLOW;
112 }
113
114 void *const base = realloc(list->base, capacity * item_size);
115 if (base == NULL) {
116 return ENOMEM;
117 }
118
119 // zero the new memory
120 {
121 void *const new = INDEX_TO(base, list->capacity, item_size);
122 const size_t new_bytes = (capacity - list->capacity) * item_size;
123 memset(new, 0, new_bytes);
124
125 // poison the new (conceptually unallocated) memory
126 ASAN_POISON(new, new_bytes);
127 }
128
129 // Do we need to shuffle the prefix upwards? E.g.
130 //
131 // ┌───┬───┬───┬───┐
132 // old: │ 3 │ 4 │ 1 │ 2 │
133 // └───┴───┴─┼─┴─┼─┘
134 // │ └───────────────┐
135 // └───────────────┐ │
136 // ▼ ▼
137 // ┌───┬───┬───┬───┬───┬───┬───┬───┐
138 // new: │ 3 │ 4 │ │ │ │ │ 1 │ 2 │
139 // └───┴───┴───┴───┴───┴───┴───┴───┘
140 // a b c d e f g h
141 if (list->head + list->size > list->capacity) {
142 const size_t prefix = list->capacity - list->head;
143 const size_t new_head = capacity - prefix;
144 // unpoison target range, slots [g, h] in example
145 void *const target = INDEX_TO(base, new_head, item_size);
146 ASAN_UNPOISON(target, prefix * item_size);
147 const void *const src = INDEX_TO(base, list->head, item_size);
148 memmove(target, src, prefix * item_size);
149 // (re-)poison new gap, slots [c, f] in example
150 void *const gap_begin = INDEX_TO(base, list->size - prefix, item_size);
151 ASAN_POISON(gap_begin, (list->capacity - list->size) * item_size);
152 list->head = new_head;
153 }
154
155 list->base = base;
156 list->capacity = capacity;
157 return 0;
158}
159
160bool gv_list_try_append_(list_t_ *list, const void *item, size_t item_size) {
161 assert(list != NULL);
162 assert(item != NULL);
163
164 // do we need to expand the backing storage?
165 if (list->size == list->capacity) {
166 do {
167 // can we attempt doubling without integer overflow?
168 if (SIZE_MAX / 2 >= list->capacity) {
169 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
170 if (try_reserve(list, c, item_size) == 0) {
171 // success
172 break;
173 }
174 }
175
176 // try a more conservative expansion
177 if (SIZE_MAX - 1 >= list->capacity) {
178 if (try_reserve(list, list->capacity + 1, item_size) == 0) {
179 // success
180 break;
181 }
182 }
183
184 // failed to expand the list
185 return false;
186 } while (0);
187 }
188
189 assert(list->size < list->capacity);
190
191 // we can now append, knowing it will not require backing storage expansion
192 const size_t new_slot = (list->head + list->size) % list->capacity;
193 void *const slot = INDEX_TO(list, new_slot, item_size);
194 ASAN_UNPOISON(slot, item_size);
195 memcpy(slot, item, item_size);
196 ++list->size;
197
198 return true;
199}
200
201size_t gv_list_get_(const list_t_ list, size_t index) {
202 assert(index < list.size && "index out of bounds");
203 return (list.head + index) % list.capacity;
204}
205
206size_t gv_list_find_(const list_t_ list, const void *needle, size_t item_size) {
207
208 for (size_t i = 0; i < list.size; ++i) {
209 const size_t slot = gv_list_get_(list, i);
210 const void *candidate = INDEX_TO(&list, slot, item_size);
211 if (memcmp(needle, candidate, item_size) == 0) {
212 return i;
213 }
214 }
215
216 return SIZE_MAX;
217}
218
219void gv_list_remove_(list_t_ *list, size_t index, size_t item_size) {
220 assert(list != NULL);
221 assert(index < list->size);
222
223 // shrink the list
224 for (size_t i = index + 1; i < list->size; ++i) {
225 const size_t dst_slot = gv_list_get_(*list, i - 1);
226 void *const dst = INDEX_TO(list, dst_slot, item_size);
227 const size_t src_slot = gv_list_get_(*list, i);
228 const void *const src = INDEX_TO(list, src_slot, item_size);
229 memcpy(dst, src, item_size);
230 }
231 const size_t truncated_slot = gv_list_get_(*list, list->size - 1);
232 void *truncated = INDEX_TO(list, truncated_slot, item_size);
233 ASAN_POISON(truncated, item_size);
234 --list->size;
235}
236
237void gv_list_clear_(list_t_ *list, size_t item_size) {
238 assert(list != NULL);
239
240 for (size_t i = 0; i < list->size; ++i) {
241 const size_t slot = gv_list_get_(*list, i);
242 void *const to_poison = INDEX_TO(list, slot, item_size);
243 ASAN_POISON(to_poison, item_size);
244 }
245
246 list->size = 0;
247
248 // opportunistically re-sync the list
249 list->head = 0;
250}
251
252void gv_list_reserve_(list_t_ *list, size_t capacity, size_t item_size) {
253 const int err = try_reserve(list, capacity, item_size);
254 if (err != 0) {
255 fprintf(stderr,
256 "failed to reserve %" PRISIZE_T " elements of size %" PRISIZE_T
257 " bytes: %s\n",
258 capacity, item_size, strerror(err));
259 graphviz_exit(EXIT_FAILURE);
260 }
261}
262
263bool gv_list_contains_(const list_t_ list, const void *needle,
264 size_t item_size) {
265 return gv_list_find_(list, needle, item_size) != SIZE_MAX;
266}
267
268list_t_ gv_list_copy_(const list_t_ list, size_t item_size) {
269 list_t_ ret = {.base = gv_calloc(list.capacity, item_size),
270 .capacity = list.capacity};
271
272 // opportunistically create the new list synced
273 for (size_t i = 0; i < list.size; ++i) {
274 const size_t slot = gv_list_get_(list, i);
275 const void *const src = INDEX_TO(&list, slot, item_size);
276 void *const dst = INDEX_TO(&ret, ret.size, item_size);
277 assert(ret.size < ret.capacity);
278 memcpy(dst, src, item_size);
279 ++ret.size;
280 }
281
282 // mark the remainder of the allocated space as inaccessible
283 void *const to_poison = INDEX_TO(&ret, ret.size, item_size);
284 const size_t to_poison_len = (ret.capacity - ret.size) * item_size;
285 ASAN_POISON(to_poison, to_poison_len);
286
287 return ret;
288}
289
291 return list.head + list.size <= list.capacity;
292}
293
294void gv_list_sync_(list_t_ *list, size_t item_size) {
295 assert(list != NULL);
296
297 // Allow unrestricted access. The shuffle below accesses both allocated
298 // and unallocated elements, so just let it read and write everything.
299 ASAN_UNPOISON(list->base, list->capacity * item_size);
300
301 // Shuffle the list 1-1 until it is aligned. This is not efficient, but
302 // we assume this is a relatively rare operation.
303 while (list->head != 0) {
304 // rotate the list leftwards by 1
305 assert(list->capacity > 0);
306 // shuffle byte-by-byte to avoid dynamic allocation
307 for (size_t i = 0; i < item_size; ++i) {
308 uint8_t lowest;
309 memcpy(&lowest, list->base, sizeof(lowest));
310 const size_t remainder = list->capacity * item_size - sizeof(lowest);
311 memmove(list->base, (char *)list->base + sizeof(lowest), remainder);
312 memcpy((char *)list->base + remainder, &lowest, sizeof(lowest));
313 }
314 --list->head;
315 }
316
317 /* synchronization should have ensured the list no longer wraps */
318 assert(gv_list_is_contiguous_(*list));
319
320 /* re-establish access restrictions */
321 void *end = INDEX_TO(list, list->size, item_size);
322 ASAN_POISON(end, (list->capacity - list->size) * item_size);
323}
324
325void gv_list_sort_(list_t_ *list, int (*cmp)(const void *, const void *),
326 size_t item_size) {
327 assert(list != NULL);
328 assert(cmp != NULL);
329
330 gv_list_sync_(list, item_size);
331
332 if (list->size) {
333 qsort(list->base, list->size, item_size, cmp);
334 }
335}
336
337static void exchange(void *a, void *b, size_t size) {
338 assert(a != NULL);
339 assert(b != NULL);
340
341 // do a byte-by-byte swap of the two objects
342 char *x = a;
343 char *y = b;
344 for (size_t i = 0; i < size; ++i) {
345 SWAP(&x[i], &y[i]);
346 }
347}
348
349void gv_list_reverse_(list_t_ *list, size_t item_size) {
350 assert(list != NULL);
351
352 // move from the outside inwards, swapping elements
353 for (size_t i = 0; i < list->size / 2; ++i) {
354 const size_t a = gv_list_get_(*list, i);
355 const size_t b = gv_list_get_(*list, list->size - i - 1);
356 void *const x = INDEX_TO(list, a, item_size);
357 void *const y = INDEX_TO(list, b, item_size);
358 exchange(x, y, item_size);
359 }
360}
361
362void gv_list_shrink_to_fit_(list_t_ *list, size_t item_size) {
363 assert(list != NULL);
364
365 gv_list_sync_(list, item_size);
366
367 if (list->capacity > list->size) {
368 list->base = gv_recalloc(list->base, list->capacity, list->size, item_size);
369 list->capacity = list->size;
370 }
371}
372
374 assert(list != NULL);
375 free(list->base);
376 *list = (list_t_){0};
377}
378
379void gv_list_pop_front_(list_t_ *list, void *into, size_t item_size) {
380 assert(list != NULL);
381 assert(list->size > 0);
382 assert(into != NULL);
383
384 // find and pop the first slot
385 const size_t slot = gv_list_get_(*list, 0);
386 void *const to_pop = INDEX_TO(list, slot, item_size);
387 memcpy(into, to_pop, item_size);
388 ASAN_POISON(to_pop, item_size);
389 list->head = (list->head + 1) % list->capacity;
390 --list->size;
391}
392
393void gv_list_pop_back_(list_t_ *list, void *into, size_t item_size) {
394 assert(list != NULL);
395 assert(list->size > 0);
396 assert(into != NULL);
397
398 // find and pop last slot
399 const size_t slot = gv_list_get_(*list, list->size - 1);
400 void *const to_pop = INDEX_TO(list, slot, item_size);
401 memcpy(into, to_pop, item_size);
402 ASAN_POISON(to_pop, item_size);
403 --list->size;
404}
405
406void gv_list_detach_(list_t_ *list, void *datap, size_t *sizep,
407 size_t item_size) {
408 assert(list != NULL);
409 assert(datap != NULL);
410
411 gv_list_sync_(list, item_size);
412 memcpy(datap, &list->base, sizeof(void *));
413 if (sizep != NULL) {
414 *sizep = list->size;
415 }
416
417 *list = (list_t_){0};
418}
Memory allocation wrappers that exit on failure.
static void * gv_recalloc(void *ptr, size_t old_nmemb, size_t new_nmemb, size_t size)
Definition alloc.h:73
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
macros for interacting with Address Sanitizer
#define ASAN_POISON(addr, size)
Definition asan.h:15
#define ASAN_UNPOISON(addr, size)
Definition asan.h:22
static char * err
Definition delaunay.c:532
static NORETURN void graphviz_exit(int status)
Definition exit.h:23
static int cmp(const void *key, const void *candidate)
void free(void *)
require define api prefix
Definition gmlparse.y:17
#define SIZE_MAX
Definition gmlscan.c:347
node NULL
Definition grammar.y:181
Arithmetic helper functions.
#define SWAP(a, b)
Definition gv_math.h:134
internal implementation details of list.h
static void exchange(void *a, void *b, size_t size)
Definition list.c:337
list_t_ gv_list_copy_(const list_t_ list, size_t item_size)
Definition list.c:268
bool gv_list_try_append_(list_t_ *list, const void *item, size_t item_size)
Definition list.c:160
static const void * slot_from_const_list(const list_t_ *list, size_t index, size_t stride)
Definition list.c:19
size_t gv_list_prepend_slot_(list_t_ *list, size_t item_size)
Definition list.c:79
void gv_list_pop_front_(list_t_ *list, void *into, size_t item_size)
Definition list.c:379
static const void * slot_from_const_base(const void *base, size_t index, size_t stride)
Definition list.c:36
#define INDEX_TO(origin, index, stride)
Definition list.c:51
bool gv_list_contains_(const list_t_ list, const void *needle, size_t item_size)
Definition list.c:263
size_t gv_list_append_slot_(list_t_ *list, size_t item_size)
Definition list.c:58
void gv_list_reserve_(list_t_ *list, size_t capacity, size_t item_size)
Definition list.c:252
void gv_list_free_(list_t_ *list)
Definition list.c:373
static void * slot_from_base(void *base, size_t index, size_t stride)
Definition list.c:44
void gv_list_clear_(list_t_ *list, size_t item_size)
Definition list.c:237
void gv_list_detach_(list_t_ *list, void *datap, size_t *sizep, size_t item_size)
Definition list.c:406
bool gv_list_is_contiguous_(const list_t_ list)
Definition list.c:290
void gv_list_shrink_to_fit_(list_t_ *list, size_t item_size)
Definition list.c:362
void gv_list_sort_(list_t_ *list, int(*cmp)(const void *, const void *), size_t item_size)
Definition list.c:325
void gv_list_reverse_(list_t_ *list, size_t item_size)
Definition list.c:349
void gv_list_pop_back_(list_t_ *list, void *into, size_t item_size)
Definition list.c:393
size_t gv_list_find_(const list_t_ list, const void *needle, size_t item_size)
Definition list.c:206
void gv_list_sync_(list_t_ *list, size_t item_size)
Definition list.c:294
static int try_reserve(list_t_ *list, size_t capacity, size_t item_size)
Definition list.c:100
static void * slot_from_list(list_t_ *list, size_t index, size_t stride)
Definition list.c:28
void gv_list_remove_(list_t_ *list, size_t index, size_t item_size)
Definition list.c:219
size_t gv_list_get_(const list_t_ list, size_t index)
Definition list.c:201
#define PRISIZE_T
Definition prisize_t.h:25
Definition utils.c:752
size_t size
size <= capacity
size_t capacity
available storage slots
void * base
(base == NULL && capacity == 0) || (base != NULL && capacity > 0)
size_t head
(capacity == 0 && head == 0) || (capacity > 0 && head < capacity)