Graphviz 14.0.2~dev.20251009.1020
Loading...
Searching...
No Matches
list.c
Go to the documentation of this file.
1#include <assert.h>
2#include <errno.h>
3#include <stdbool.h>
4#include <stdint.h>
5#include <stdio.h>
6#include <stdlib.h>
7#include <string.h>
8#include <util/alloc.h>
9#include <util/asan.h>
10#include <util/exit.h>
11#include <util/gv_math.h>
12#include <util/list-private.h>
13#include <util/prisize_t.h>
14
15static const void *slot_from_const_list(const list_t_ *list, size_t index,
16 size_t stride) {
17 assert(list != NULL);
18 assert(list->base != NULL || index == 0 || stride == 0);
19
20 const char *const base = list->base;
21 return base + index * stride;
22}
23
24static void *slot_from_list(list_t_ *list, size_t index, size_t stride) {
25 assert(list != NULL);
26 assert(list->base != NULL || index == 0 || stride == 0);
27
28 char *const base = list->base;
29 return base + index * stride;
30}
31
32static const void *slot_from_const_base(const void *base, size_t index,
33 size_t stride) {
34 assert(base != NULL || index == 0 || stride == 0);
35
36 const char *const b = base;
37 return b + index * stride;
38}
39
40static void *slot_from_base(void *base, size_t index, size_t stride) {
41 assert(base != NULL || index == 0 || stride == 0);
42
43 char *const b = base;
44 return b + index * stride;
45}
46
47#define INDEX_TO(origin, index, stride) \
48 (_Generic((origin), \
49 const list_t_ *: slot_from_const_list, \
50 list_t_ *: slot_from_list, \
51 const void *: slot_from_const_base, \
52 void *: slot_from_base)((origin), (index), (stride)))
53
54size_t gv_list_append_slot_(list_t_ *list, size_t item_size) {
55 assert(list != NULL);
56
57 // do we need to expand the backing storage?
58 if (list->size == list->capacity) {
59 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
60 gv_list_reserve_(list, c, item_size);
61 }
62
63 assert(list->capacity > 0);
64 assert(list->size < list->capacity);
65
66 // append the new slot
67 const size_t new_slot = (list->head + list->size) % list->capacity;
68 void *const slot = INDEX_TO(list, new_slot, item_size);
69 ASAN_UNPOISON(slot, item_size);
70 ++list->size;
71
72 return new_slot;
73}
74
75size_t gv_list_prepend_slot_(list_t_ *list, size_t item_size) {
76 assert(list != NULL);
77
78 // do we need to expand the backing storage?
79 if (list->size == list->capacity) {
80 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
81 gv_list_reserve_(list, c, item_size);
82 }
83
84 assert(list->capacity > 0);
85 assert(list->size < list->capacity);
86
87 // prepend the new slot
88 list->head = (list->head + (list->capacity - 1)) % list->capacity;
89 void *const slot = INDEX_TO(list, list->head, item_size);
90 ASAN_UNPOISON(slot, item_size);
91 ++list->size;
92
93 return list->head;
94}
95
96static int try_reserve(list_t_ *list, size_t capacity, size_t item_size) {
97 assert(list != NULL);
98
99 // if we can already fit enough items, nothing to do
100 if (list->capacity >= capacity) {
101 return 0;
102 }
103
104 // will the arithmetic below overflow?
105 assert(capacity > 0);
106 if (SIZE_MAX / capacity < item_size) {
107 return EOVERFLOW;
108 }
109
110 void *const base = realloc(list->base, capacity * item_size);
111 if (base == NULL) {
112 return ENOMEM;
113 }
114
115 // zero the new memory
116 {
117 void *const new = INDEX_TO(base, list->capacity, item_size);
118 const size_t new_bytes = (capacity - list->capacity) * item_size;
119 memset(new, 0, new_bytes);
120
121 // poison the new (conceptually unallocated) memory
122 ASAN_POISON(new, new_bytes);
123 }
124
125 // Do we need to shuffle the prefix upwards? E.g.
126 //
127 // ┌───┬───┬───┬───┐
128 // old: │ 3 │ 4 │ 1 │ 2 │
129 // └───┴───┴─┼─┴─┼─┘
130 // │ └───────────────┐
131 // └───────────────┐ │
132 // ▼ ▼
133 // ┌───┬───┬───┬───┬───┬───┬───┬───┐
134 // new: │ 3 │ 4 │ │ │ │ │ 1 │ 2 │
135 // └───┴───┴───┴───┴───┴───┴───┴───┘
136 // a b c d e f g h
137 if (list->head + list->size > list->capacity) {
138 const size_t prefix = list->capacity - list->head;
139 const size_t new_head = capacity - prefix;
140 // unpoison target range, slots [g, h] in example
141 void *const target = INDEX_TO(base, new_head, item_size);
142 ASAN_UNPOISON(target, prefix * item_size);
143 const void *const src = INDEX_TO(base, list->head, item_size);
144 memmove(target, src, prefix * item_size);
145 // (re-)poison new gap, slots [c, f] in example
146 void *const gap_begin = INDEX_TO(base, list->size - prefix, item_size);
147 ASAN_POISON(gap_begin, (list->capacity - list->size) * item_size);
148 list->head = new_head;
149 }
150
151 list->base = base;
152 list->capacity = capacity;
153 return 0;
154}
155
156bool gv_list_try_append_(list_t_ *list, const void *item, size_t item_size) {
157 assert(list != NULL);
158 assert(item != NULL);
159
160 // do we need to expand the backing storage?
161 if (list->size == list->capacity) {
162 do {
163 // can we attempt doubling without integer overflow?
164 if (SIZE_MAX / 2 >= list->capacity) {
165 const size_t c = list->capacity == 0 ? 1 : (list->capacity * 2);
166 if (try_reserve(list, c, item_size) == 0) {
167 // success
168 break;
169 }
170 }
171
172 // try a more conservative expansion
173 if (SIZE_MAX - 1 >= list->capacity) {
174 if (try_reserve(list, list->capacity + 1, item_size) == 0) {
175 // success
176 break;
177 }
178 }
179
180 // failed to expand the list
181 return false;
182 } while (0);
183 }
184
185 assert(list->size < list->capacity);
186
187 // we can now append, knowing it will not require backing storage expansion
188 const size_t new_slot = (list->head + list->size) % list->capacity;
189 void *const slot = INDEX_TO(list, new_slot, item_size);
190 ASAN_UNPOISON(slot, item_size);
191 memcpy(slot, item, item_size);
192 ++list->size;
193
194 return true;
195}
196
197size_t gv_list_get_(const list_t_ list, size_t index) {
198 assert(index < list.size && "index out of bounds");
199 return (list.head + index) % list.capacity;
200}
201
202size_t gv_list_find_(const list_t_ list, const void *needle, size_t item_size) {
203
204 for (size_t i = 0; i < list.size; ++i) {
205 const size_t slot = gv_list_get_(list, i);
206 const void *candidate = INDEX_TO(&list, slot, item_size);
207 if (memcmp(needle, candidate, item_size) == 0) {
208 return i;
209 }
210 }
211
212 return SIZE_MAX;
213}
214
215void gv_list_remove_(list_t_ *list, size_t index, size_t item_size) {
216 assert(list != NULL);
217 assert(index < list->size);
218
219 // shrink the list
220 for (size_t i = index + 1; i < list->size; ++i) {
221 const size_t dst_slot = gv_list_get_(*list, i - 1);
222 void *const dst = INDEX_TO(list, dst_slot, item_size);
223 const size_t src_slot = gv_list_get_(*list, i);
224 const void *const src = INDEX_TO(list, src_slot, item_size);
225 memcpy(dst, src, item_size);
226 }
227 const size_t truncated_slot = gv_list_get_(*list, list->size - 1);
228 void *truncated = INDEX_TO(list, truncated_slot, item_size);
229 ASAN_POISON(truncated, item_size);
230 --list->size;
231}
232
233void gv_list_clear_(list_t_ *list, size_t item_size) {
234 assert(list != NULL);
235
236 for (size_t i = 0; i < list->size; ++i) {
237 const size_t slot = gv_list_get_(*list, i);
238 void *const to_poison = INDEX_TO(list, slot, item_size);
239 ASAN_POISON(to_poison, item_size);
240 }
241
242 list->size = 0;
243
244 // opportunistically re-sync the list
245 list->head = 0;
246}
247
248void gv_list_reserve_(list_t_ *list, size_t capacity, size_t item_size) {
249 const int err = try_reserve(list, capacity, item_size);
250 if (err != 0) {
251 fprintf(stderr,
252 "failed to reserve %" PRISIZE_T " elements of size %" PRISIZE_T
253 " bytes: %s\n",
254 capacity, item_size, strerror(err));
255 graphviz_exit(EXIT_FAILURE);
256 }
257}
258
259bool gv_list_contains_(const list_t_ list, const void *needle,
260 size_t item_size) {
261 return gv_list_find_(list, needle, item_size) != SIZE_MAX;
262}
263
264list_t_ gv_list_copy_(const list_t_ list, size_t item_size) {
265 list_t_ ret = {.base = gv_calloc(list.capacity, item_size),
266 .capacity = list.capacity};
267
268 // opportunistically create the new list synced
269 for (size_t i = 0; i < list.size; ++i) {
270 const size_t slot = gv_list_get_(list, i);
271 const void *const src = INDEX_TO(&list, slot, item_size);
272 void *const dst = INDEX_TO(&ret, ret.size, item_size);
273 assert(ret.size < ret.capacity);
274 memcpy(dst, src, item_size);
275 ++ret.size;
276 }
277
278 // mark the remainder of the allocated space as inaccessible
279 void *const to_poison = INDEX_TO(&ret, ret.size, item_size);
280 const size_t to_poison_len = (ret.capacity - ret.size) * item_size;
281 ASAN_POISON(to_poison, to_poison_len);
282
283 return ret;
284}
285
287 return list.head + list.size <= list.capacity;
288}
289
290void gv_list_sync_(list_t_ *list, size_t item_size) {
291 assert(list != NULL);
292
293 // Allow unrestricted access. The shuffle below accesses both allocated
294 // and unallocated elements, so just let it read and write everything.
295 ASAN_UNPOISON(list->base, list->capacity * item_size);
296
297 // Shuffle the list 1-1 until it is aligned. This is not efficient, but
298 // we assume this is a relatively rare operation.
299 while (list->head != 0) {
300 // rotate the list leftwards by 1
301 assert(list->capacity > 0);
302 // shuffle byte-by-byte to avoid dynamic allocation
303 for (size_t i = 0; i < item_size; ++i) {
304 uint8_t lowest;
305 memcpy(&lowest, list->base, sizeof(lowest));
306 const size_t remainder = list->capacity * item_size - sizeof(lowest);
307 memmove(list->base, (char *)list->base + sizeof(lowest), remainder);
308 memcpy((char *)list->base + remainder, &lowest, sizeof(lowest));
309 }
310 --list->head;
311 }
312
313 /* synchronization should have ensured the list no longer wraps */
314 assert(gv_list_is_contiguous_(*list));
315
316 /* re-establish access restrictions */
317 void *end = INDEX_TO(list, list->size, item_size);
318 ASAN_POISON(end, (list->capacity - list->size) * item_size);
319}
320
321void gv_list_sort_(list_t_ *list, int (*cmp)(const void *, const void *),
322 size_t item_size) {
323 assert(list != NULL);
324 assert(cmp != NULL);
325
326 gv_list_sync_(list, item_size);
327
328 if (list->size) {
329 qsort(list->base, list->size, item_size, cmp);
330 }
331}
332
333static void exchange(void *a, void *b, size_t size) {
334 assert(a != NULL);
335 assert(b != NULL);
336
337 // do a byte-by-byte swap of the two objects
338 char *x = a;
339 char *y = b;
340 for (size_t i = 0; i < size; ++i) {
341 SWAP(&x[i], &y[i]);
342 }
343}
344
345void gv_list_reverse_(list_t_ *list, size_t item_size) {
346 assert(list != NULL);
347
348 // move from the outside inwards, swapping elements
349 for (size_t i = 0; i < list->size / 2; ++i) {
350 const size_t a = gv_list_get_(*list, i);
351 const size_t b = gv_list_get_(*list, list->size - i - 1);
352 void *const x = INDEX_TO(list, a, item_size);
353 void *const y = INDEX_TO(list, b, item_size);
354 exchange(x, y, item_size);
355 }
356}
357
358void gv_list_shrink_to_fit_(list_t_ *list, size_t item_size) {
359 assert(list != NULL);
360
361 gv_list_sync_(list, item_size);
362
363 if (list->capacity > list->size) {
364 list->base = gv_recalloc(list->base, list->capacity, list->size, item_size);
365 list->capacity = list->size;
366 }
367}
368
370 assert(list != NULL);
371 free(list->base);
372 *list = (list_t_){0};
373}
374
375void gv_list_pop_front_(list_t_ *list, void *into, size_t item_size) {
376 assert(list != NULL);
377 assert(list->size > 0);
378 assert(into != NULL);
379
380 // find and pop the first slot
381 const size_t slot = gv_list_get_(*list, 0);
382 void *const to_pop = INDEX_TO(list, slot, item_size);
383 memcpy(into, to_pop, item_size);
384 ASAN_POISON(to_pop, item_size);
385 list->head = (list->head + 1) % list->capacity;
386 --list->size;
387}
388
389void gv_list_pop_back_(list_t_ *list, void *into, size_t item_size) {
390 assert(list != NULL);
391 assert(list->size > 0);
392 assert(into != NULL);
393
394 // find and pop last slot
395 const size_t slot = gv_list_get_(*list, list->size - 1);
396 void *const to_pop = INDEX_TO(list, slot, item_size);
397 memcpy(into, to_pop, item_size);
398 ASAN_POISON(to_pop, item_size);
399 --list->size;
400}
401
402void gv_list_detach_(list_t_ *list, void *datap, size_t *sizep,
403 size_t item_size) {
404 assert(list != NULL);
405 assert(datap != NULL);
406
407 gv_list_sync_(list, item_size);
408 memcpy(datap, &list->base, sizeof(void *));
409 if (sizep != NULL) {
410 *sizep = list->size;
411 }
412
413 *list = (list_t_){0};
414}
Memory allocation wrappers that exit on failure.
static void * gv_recalloc(void *ptr, size_t old_nmemb, size_t new_nmemb, size_t size)
Definition alloc.h:73
static void * gv_calloc(size_t nmemb, size_t size)
Definition alloc.h:26
macros for interacting with Address Sanitizer
#define ASAN_POISON(addr, size)
Definition asan.h:15
#define ASAN_UNPOISON(addr, size)
Definition asan.h:22
static char * err
Definition delaunay.c:546
static NORETURN void graphviz_exit(int status)
Definition exit.h:23
static int cmp(const void *key, const void *candidate)
void free(void *)
require define api prefix
Definition gmlparse.y:17
#define SIZE_MAX
Definition gmlscan.c:347
node NULL
Definition grammar.y:181
Arithmetic helper functions.
#define SWAP(a, b)
Definition gv_math.h:134
#define exchange(h, i, j, index)
Definition dijkstra.c:45
internal implementation details of list.h
list_t_ gv_list_copy_(const list_t_ list, size_t item_size)
Definition list.c:264
bool gv_list_try_append_(list_t_ *list, const void *item, size_t item_size)
Definition list.c:156
static const void * slot_from_const_list(const list_t_ *list, size_t index, size_t stride)
Definition list.c:15
size_t gv_list_prepend_slot_(list_t_ *list, size_t item_size)
Definition list.c:75
void gv_list_pop_front_(list_t_ *list, void *into, size_t item_size)
Definition list.c:375
static const void * slot_from_const_base(const void *base, size_t index, size_t stride)
Definition list.c:32
#define INDEX_TO(origin, index, stride)
Definition list.c:47
bool gv_list_contains_(const list_t_ list, const void *needle, size_t item_size)
Definition list.c:259
size_t gv_list_append_slot_(list_t_ *list, size_t item_size)
Definition list.c:54
void gv_list_reserve_(list_t_ *list, size_t capacity, size_t item_size)
Definition list.c:248
void gv_list_free_(list_t_ *list)
Definition list.c:369
static void * slot_from_base(void *base, size_t index, size_t stride)
Definition list.c:40
void gv_list_clear_(list_t_ *list, size_t item_size)
Definition list.c:233
void gv_list_detach_(list_t_ *list, void *datap, size_t *sizep, size_t item_size)
Definition list.c:402
bool gv_list_is_contiguous_(const list_t_ list)
Definition list.c:286
void gv_list_shrink_to_fit_(list_t_ *list, size_t item_size)
Definition list.c:358
void gv_list_sort_(list_t_ *list, int(*cmp)(const void *, const void *), size_t item_size)
Definition list.c:321
void gv_list_reverse_(list_t_ *list, size_t item_size)
Definition list.c:345
void gv_list_pop_back_(list_t_ *list, void *into, size_t item_size)
Definition list.c:389
size_t gv_list_find_(const list_t_ list, const void *needle, size_t item_size)
Definition list.c:202
void gv_list_sync_(list_t_ *list, size_t item_size)
Definition list.c:290
static int try_reserve(list_t_ *list, size_t capacity, size_t item_size)
Definition list.c:96
static void * slot_from_list(list_t_ *list, size_t index, size_t stride)
Definition list.c:24
void gv_list_remove_(list_t_ *list, size_t index, size_t item_size)
Definition list.c:215
size_t gv_list_get_(const list_t_ list, size_t index)
Definition list.c:197
#define PRISIZE_T
Definition prisize_t.h:25
Definition utils.c:750
size_t size
size <= capacity
size_t capacity
available storage slots
void * base
(base == NULL && capacity == 0) || (base != NULL && capacity > 0)
size_t head
(capacity == 0 && head == 0) || (capacity > 0 && head < capacity)